Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
cb5b0fb9
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
cb5b0fb9
编写于
7月 01, 2010
作者:
J
jcoomes
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
6957084: simplify TaskQueue overflow handling
Reviewed-by: ysr, jmasa
上级
5416cce2
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
202 addition
and
449 deletion
+202
-449
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
+1
-1
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
...c_implementation/parallelScavenge/psCompactionManager.cpp
+12
-137
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
...c_implementation/parallelScavenge/psCompactionManager.hpp
+18
-51
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
...mentation/parallelScavenge/psCompactionManager.inline.hpp
+12
-3
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
.../gc_implementation/parallelScavenge/psParallelCompact.cpp
+2
-2
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
.../gc_implementation/parallelScavenge/psParallelCompact.hpp
+3
-6
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
...gc_implementation/parallelScavenge/psPromotionManager.cpp
+29
-86
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
...gc_implementation/parallelScavenge/psPromotionManager.hpp
+23
-40
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
...hare/vm/gc_implementation/parallelScavenge/psScavenge.cpp
+0
-1
src/share/vm/utilities/taskqueue.cpp
src/share/vm/utilities/taskqueue.cpp
+1
-71
src/share/vm/utilities/taskqueue.hpp
src/share/vm/utilities/taskqueue.hpp
+101
-51
未找到文件。
src/share/vm/gc_implementation/includeDB_gc_parallelScavenge
浏览文件 @
cb5b0fb9
...
...
@@ -270,7 +270,7 @@ psParallelCompact.cpp parallelScavengeHeap.inline.hpp
psParallelCompact.cpp pcTasks.hpp
psParallelCompact.cpp psMarkSweep.hpp
psParallelCompact.cpp psMarkSweepDecorator.hpp
psParallelCompact.cpp psCompactionManager.hpp
psParallelCompact.cpp psCompactionManager.
inline.
hpp
psParallelCompact.cpp psPromotionManager.inline.hpp
psParallelCompact.cpp psOldGen.hpp
psParallelCompact.cpp psParallelCompact.hpp
...
...
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2005, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -32,7 +32,7 @@ ParCompactionManager::ObjArrayTaskQueueSet*
ParCompactionManager
::
_objarray_queues
=
NULL
;
ObjectStartArray
*
ParCompactionManager
::
_start_array
=
NULL
;
ParMarkBitMap
*
ParCompactionManager
::
_mark_bitmap
=
NULL
;
RegionTaskQueueSet
*
ParCompactionManager
::
_region_array
=
NULL
;
RegionTaskQueueSet
*
ParCompactionManager
::
_region_array
=
NULL
;
ParCompactionManager
::
ParCompactionManager
()
:
_action
(
CopyAndUpdate
)
{
...
...
@@ -43,25 +43,9 @@ ParCompactionManager::ParCompactionManager() :
_old_gen
=
heap
->
old_gen
();
_start_array
=
old_gen
()
->
start_array
();
marking_stack
()
->
initialize
();
// We want the overflow stack to be permanent
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
10
,
true
);
_objarray_queue
.
initialize
();
_objarray_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
ObjArrayOverflowStack
(
10
,
true
);
#ifdef USE_RegionTaskQueueWithOverflow
_objarray_stack
.
initialize
();
region_stack
()
->
initialize
();
#else
region_stack
()
->
initialize
();
// We want the overflow stack to be permanent
_region_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
size_t
>
(
10
,
true
);
#endif
// Note that _revisit_klass_stack is allocated out of the
// C heap (as opposed to out of ResourceArena).
...
...
@@ -71,12 +55,9 @@ ParCompactionManager::ParCompactionManager() :
// From some experiments (#klass/k)^2 for k = 10 seems a better fit, but this will
// have to do for now until we are able to investigate a more optimal setting.
_revisit_mdo_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
DataLayout
*>
(
size
*
2
,
true
);
}
ParCompactionManager
::~
ParCompactionManager
()
{
delete
_overflow_stack
;
delete
_objarray_overflow_stack
;
delete
_revisit_klass_stack
;
delete
_revisit_mdo_stack
;
// _manager_array and _stack_array are statics
...
...
@@ -108,12 +89,8 @@ void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
_manager_array
[
i
]
=
new
ParCompactionManager
();
guarantee
(
_manager_array
[
i
]
!=
NULL
,
"Could not create ParCompactionManager"
);
stack_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
marking_stack
());
_objarray_queues
->
register_queue
(
i
,
&
_manager_array
[
i
]
->
_objarray_queue
);
#ifdef USE_RegionTaskQueueWithOverflow
region_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
region_stack
()
->
task_queue
());
#else
_objarray_queues
->
register_queue
(
i
,
&
_manager_array
[
i
]
->
_objarray_stack
);
region_array
()
->
register_queue
(
i
,
_manager_array
[
i
]
->
region_stack
());
#endif
}
// The VMThread gets its own ParCompactionManager, which is not available
...
...
@@ -149,57 +126,6 @@ bool ParCompactionManager::should_reset_only() {
return
action
()
==
ParCompactionManager
::
ResetObjects
;
}
// For now save on a stack
void
ParCompactionManager
::
save_for_scanning
(
oop
m
)
{
stack_push
(
m
);
}
void
ParCompactionManager
::
stack_push
(
oop
obj
)
{
if
(
!
marking_stack
()
->
push
(
obj
))
{
overflow_stack
()
->
push
(
obj
);
}
}
oop
ParCompactionManager
::
retrieve_for_scanning
()
{
// Should not be used in the parallel case
ShouldNotReachHere
();
return
NULL
;
}
// Save region on a stack
void
ParCompactionManager
::
save_for_processing
(
size_t
region_index
)
{
#ifdef ASSERT
const
ParallelCompactData
&
sd
=
PSParallelCompact
::
summary_data
();
ParallelCompactData
::
RegionData
*
const
region_ptr
=
sd
.
region
(
region_index
);
assert
(
region_ptr
->
claimed
(),
"must be claimed"
);
assert
(
region_ptr
->
_pushed
++
==
0
,
"should only be pushed once"
);
#endif
region_stack_push
(
region_index
);
}
void
ParCompactionManager
::
region_stack_push
(
size_t
region_index
)
{
#ifdef USE_RegionTaskQueueWithOverflow
region_stack
()
->
save
(
region_index
);
#else
if
(
!
region_stack
()
->
push
(
region_index
))
{
region_overflow_stack
()
->
push
(
region_index
);
}
#endif
}
bool
ParCompactionManager
::
retrieve_for_processing
(
size_t
&
region_index
)
{
#ifdef USE_RegionTaskQueueWithOverflow
return
region_stack
()
->
retrieve
(
region_index
);
#else
// Should not be used in the parallel case
ShouldNotReachHere
();
return
false
;
#endif
}
ParCompactionManager
*
ParCompactionManager
::
gc_thread_compaction_manager
(
int
index
)
{
assert
(
index
>=
0
&&
index
<
(
int
)
ParallelGCThreads
,
"index out of range"
);
...
...
@@ -218,8 +144,8 @@ void ParCompactionManager::follow_marking_stacks() {
do
{
// Drain the overflow stack first, to allow stealing from the marking stack.
oop
obj
;
while
(
!
overflow_stack
()
->
is_empty
(
))
{
o
verflow_stack
()
->
pop
()
->
follow_contents
(
this
);
while
(
marking_stack
()
->
pop_overflow
(
obj
))
{
o
bj
->
follow_contents
(
this
);
}
while
(
marking_stack
()
->
pop_local
(
obj
))
{
obj
->
follow_contents
(
this
);
...
...
@@ -227,11 +153,10 @@ void ParCompactionManager::follow_marking_stacks() {
// Process ObjArrays one at a time to avoid marking stack bloat.
ObjArrayTask
task
;
if
(
!
_objarray_overflow_stack
->
is_empty
())
{
task
=
_objarray_overflow_stack
->
pop
();
if
(
_objarray_stack
.
pop_overflow
(
task
))
{
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
}
else
if
(
_objarray_
queue
.
pop_local
(
task
))
{
}
else
if
(
_objarray_
stack
.
pop_local
(
task
))
{
objArrayKlass
*
const
k
=
(
objArrayKlass
*
)
task
.
obj
()
->
blueprint
();
k
->
oop_follow_contents
(
this
,
task
.
obj
(),
task
.
index
());
}
...
...
@@ -240,68 +165,18 @@ void ParCompactionManager::follow_marking_stacks() {
assert
(
marking_stacks_empty
(),
"Sanity"
);
}
void
ParCompactionManager
::
drain_region_overflow_stack
()
{
size_t
region_index
=
(
size_t
)
-
1
;
while
(
region_stack
()
->
retrieve_from_overflow
(
region_index
))
{
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
void
ParCompactionManager
::
drain_region_stacks
()
{
#ifdef ASSERT
ParallelScavengeHeap
*
heap
=
(
ParallelScavengeHeap
*
)
Universe
::
heap
();
assert
(
heap
->
kind
()
==
CollectedHeap
::
ParallelScavengeHeap
,
"Sanity"
);
MutableSpace
*
to_space
=
heap
->
young_gen
()
->
to_space
();
MutableSpace
*
old_space
=
heap
->
old_gen
()
->
object_space
();
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
#if 1 // def DO_PARALLEL - the serial code hasn't been updated
do
{
#ifdef USE_RegionTaskQueueWithOverflow
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
size_t
region_index
=
(
size_t
)
-
1
;
while
(
region_stack
()
->
retrieve_from_overflow
(
region_index
))
{
// Drain overflow stack first so other threads can steal.
size_t
region_index
;
while
(
region_stack
()
->
pop_overflow
(
region_index
))
{
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
while
(
region_stack
()
->
retrieve_from_stealable_queue
(
region_index
))
{
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
while
(
!
region_stack
()
->
is_empty
());
#else
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while
(
!
region_overflow_stack
()
->
is_empty
())
{
size_t
region_index
=
region_overflow_stack
()
->
pop
();
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
size_t
region_index
=
-
1
;
// obj is a reference!!!
while
(
region_stack
()
->
pop_local
(
region_index
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
PSParallelCompact
::
fill_and_update_region
(
this
,
region_index
);
}
}
while
((
region_stack
()
->
size
()
!=
0
)
||
(
region_overflow_stack
()
->
length
()
!=
0
));
#endif
#ifdef USE_RegionTaskQueueWithOverflow
assert
(
region_stack
()
->
is_empty
(),
"Sanity"
);
#else
assert
(
region_stack
()
->
size
()
==
0
,
"Sanity"
);
assert
(
region_overflow_stack
()
->
length
()
==
0
,
"Sanity"
);
#endif
#else
oop
obj
;
while
(
obj
=
retrieve_for_scanning
())
{
obj
->
follow_contents
(
this
);
}
#endif
}
while
(
!
region_stack
()
->
is_empty
());
}
#ifdef ASSERT
...
...
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2005, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -59,10 +59,10 @@ class ParCompactionManager : public CHeapObj {
private:
// 32-bit: 4K * 8 = 32KiB; 64-bit: 8K * 16 = 128KiB
#define
OBJARRAY_
QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef
GenericTaskQueue
<
ObjArrayTask
,
OBJARRAY_
QUEUE_SIZE
>
ObjArrayTaskQueue
;
typedef
GenericTaskQueueSet
<
ObjArrayTaskQueue
>
ObjArrayTaskQueueSet
;
#undef
OBJARRAY_
QUEUE_SIZE
#define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
typedef
OverflowTaskQueue
<
ObjArrayTask
,
QUEUE_SIZE
>
ObjArrayTaskQueue
;
typedef
GenericTaskQueueSet
<
ObjArrayTaskQueue
>
ObjArrayTaskQueueSet
;
#undef QUEUE_SIZE
static
ParCompactionManager
**
_manager_array
;
static
OopTaskQueueSet
*
_stack_array
;
...
...
@@ -72,23 +72,13 @@ class ParCompactionManager : public CHeapObj {
static
PSOldGen
*
_old_gen
;
private:
OopTaskQueue
_marking_stack
;
GrowableArray
<
oop
>*
_overflow_stack
;
typedef
GrowableArray
<
ObjArrayTask
>
ObjArrayOverflowStack
;
ObjArrayTaskQueue
_objarray_queue
;
ObjArrayOverflowStack
*
_objarray_overflow_stack
;
OverflowTaskQueue
<
oop
>
_marking_stack
;
ObjArrayTaskQueue
_objarray_stack
;
// Is there a way to reuse the _marking_stack for the
// saving empty regions? For now just create a different
// type of TaskQueue.
#ifdef USE_RegionTaskQueueWithOverflow
RegionTaskQueueWithOverflow
_region_stack
;
#else
RegionTaskQueue
_region_stack
;
GrowableArray
<
size_t
>*
_region_overflow_stack
;
#endif
#if 1 // does this happen enough to need a per thread stack?
GrowableArray
<
Klass
*>*
_revisit_klass_stack
;
...
...
@@ -107,16 +97,8 @@ private:
protected:
// Array of tasks. Needed by the ParallelTaskTerminator.
static
RegionTaskQueueSet
*
region_array
()
{
return
_region_array
;
}
OopTaskQueue
*
marking_stack
()
{
return
&
_marking_stack
;
}
GrowableArray
<
oop
>*
overflow_stack
()
{
return
_overflow_stack
;
}
#ifdef USE_RegionTaskQueueWithOverflow
RegionTaskQueueWithOverflow
*
region_stack
()
{
return
&
_region_stack
;
}
#else
RegionTaskQueue
*
region_stack
()
{
return
&
_region_stack
;
}
GrowableArray
<
size_t
>*
region_overflow_stack
()
{
return
_region_overflow_stack
;
}
#endif
OverflowTaskQueue
<
oop
>*
marking_stack
()
{
return
&
_marking_stack
;
}
RegionTaskQueue
*
region_stack
()
{
return
&
_region_stack
;
}
// Pushes onto the marking stack. If the marking stack is full,
// pushes onto the overflow stack.
...
...
@@ -124,11 +106,7 @@ private:
// Do not implement an equivalent stack_pop. Deal with the
// marking stack and overflow stack directly.
// Pushes onto the region stack. If the region stack is full,
// pushes onto the region overflow stack.
void
region_stack_push
(
size_t
region_index
);
public:
public:
Action
action
()
{
return
_action
;
}
void
set_action
(
Action
v
)
{
_action
=
v
;
}
...
...
@@ -157,22 +135,15 @@ public:
GrowableArray
<
DataLayout
*>*
revisit_mdo_stack
()
{
return
_revisit_mdo_stack
;
}
#endif
// Save oop for later processing. Must not fail.
void
save_for_scanning
(
oop
m
);
// Get a oop for scanning. If returns null, no oop were found.
oop
retrieve_for_scanning
();
inline
void
push_objarray
(
oop
obj
,
size_t
index
);
// Save region for later processing. Must not fail.
void
save_for_processing
(
size_t
region_index
);
// Get a region for processing. If returns null, no region were found.
bool
retrieve_for_processing
(
size_t
&
region_index
);
// Save for later processing. Must not fail.
inline
void
push
(
oop
obj
)
{
_marking_stack
.
push
(
obj
);
}
inline
void
push_objarray
(
oop
objarray
,
size_t
index
);
inline
void
push_region
(
size_t
index
);
// Access function for compaction managers
static
ParCompactionManager
*
gc_thread_compaction_manager
(
int
index
);
static
bool
steal
(
int
queue_num
,
int
*
seed
,
Task
&
t
)
{
static
bool
steal
(
int
queue_num
,
int
*
seed
,
oop
&
t
)
{
return
stack_array
()
->
steal
(
queue_num
,
seed
,
t
);
}
...
...
@@ -180,8 +151,8 @@ public:
return
_objarray_queues
->
steal
(
queue_num
,
seed
,
t
);
}
static
bool
steal
(
int
queue_num
,
int
*
seed
,
RegionTask
&
t
)
{
return
region_array
()
->
steal
(
queue_num
,
seed
,
t
);
static
bool
steal
(
int
queue_num
,
int
*
seed
,
size_t
&
region
)
{
return
region_array
()
->
steal
(
queue_num
,
seed
,
region
);
}
// Process tasks remaining on any marking stack
...
...
@@ -191,9 +162,6 @@ public:
// Process tasks remaining on any stack
void
drain_region_stacks
();
// Process tasks remaining on any stack
void
drain_region_overflow_stack
();
// Debugging support
#ifdef ASSERT
bool
stacks_have_been_allocated
();
...
...
@@ -208,6 +176,5 @@ inline ParCompactionManager* ParCompactionManager::manager_array(int index) {
}
bool
ParCompactionManager
::
marking_stacks_empty
()
const
{
return
_marking_stack
.
size
()
==
0
&&
_overflow_stack
->
is_empty
()
&&
_objarray_queue
.
size
()
==
0
&&
_objarray_overflow_stack
->
is_empty
();
return
_marking_stack
.
is_empty
()
&&
_objarray_stack
.
is_empty
();
}
src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp
浏览文件 @
cb5b0fb9
...
...
@@ -26,7 +26,16 @@ void ParCompactionManager::push_objarray(oop obj, size_t index)
{
ObjArrayTask
task
(
obj
,
index
);
assert
(
task
.
is_valid
(),
"bad ObjArrayTask"
);
if
(
!
_objarray_queue
.
push
(
task
))
{
_objarray_overflow_stack
->
push
(
task
);
}
_objarray_stack
.
push
(
task
);
}
void
ParCompactionManager
::
push_region
(
size_t
index
)
{
#ifdef ASSERT
const
ParallelCompactData
&
sd
=
PSParallelCompact
::
summary_data
();
ParallelCompactData
::
RegionData
*
const
region_ptr
=
sd
.
region
(
index
);
assert
(
region_ptr
->
claimed
(),
"must be claimed"
);
assert
(
region_ptr
->
_pushed
++
==
0
,
"should only be pushed once"
);
#endif
region_stack
()
->
push
(
index
);
}
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp
浏览文件 @
cb5b0fb9
...
...
@@ -2474,7 +2474,7 @@ void PSParallelCompact::enqueue_region_draining_tasks(GCTaskQueue* q,
for
(
size_t
cur
=
end_region
-
1
;
cur
>=
beg_region
;
--
cur
)
{
if
(
sd
.
region
(
cur
)
->
claim_unsafe
())
{
ParCompactionManager
*
cm
=
ParCompactionManager
::
manager_array
(
which
);
cm
->
save_for_processing
(
cur
);
cm
->
push_region
(
cur
);
if
(
TraceParallelOldGCCompactionPhase
&&
Verbose
)
{
const
size_t
count_mod_8
=
fillable_regions
&
7
;
...
...
@@ -3138,7 +3138,7 @@ void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
assert
(
cur
->
data_size
()
>
0
,
"region must have live data"
);
cur
->
decrement_destination_count
();
if
(
cur
<
enqueue_end
&&
cur
->
available
()
&&
cur
->
claim
())
{
cm
->
save_for_processing
(
sd
.
region
(
cur
));
cm
->
push_region
(
sd
.
region
(
cur
));
}
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2005, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -1297,11 +1297,8 @@ inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) {
T
heap_oop
=
oopDesc
::
load_heap_oop
(
p
);
if
(
!
oopDesc
::
is_null
(
heap_oop
))
{
oop
obj
=
oopDesc
::
decode_heap_oop_not_null
(
heap_oop
);
if
(
mark_bitmap
()
->
is_unmarked
(
obj
))
{
if
(
mark_obj
(
obj
))
{
// This thread marked the object and owns the subsequent processing of it.
cm
->
save_for_scanning
(
obj
);
}
if
(
mark_bitmap
()
->
is_unmarked
(
obj
)
&&
mark_obj
(
obj
))
{
cm
->
push
(
obj
);
}
}
}
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2002, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -94,45 +94,13 @@ void PSPromotionManager::post_scavenge() {
print_stats
();
#endif // PS_PM_STATS
for
(
uint
i
=
0
;
i
<
ParallelGCThreads
+
1
;
i
++
)
{
for
(
uint
i
=
0
;
i
<
ParallelGCThreads
+
1
;
i
++
)
{
PSPromotionManager
*
manager
=
manager_array
(
i
);
// the guarantees are a bit gratuitous but, if one fires, we'll
// have a better idea of what went wrong
if
(
i
<
ParallelGCThreads
)
{
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_depth
()
->
length
()
<=
0
),
"promotion manager overflow stack must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_breadth
()
->
length
()
<=
0
),
"promotion manager overflow stack must be empty"
);
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_depth
()
->
size
()
<=
0
),
"promotion manager claimed stack must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_breadth
()
->
size
()
<=
0
),
"promotion manager claimed stack must be empty"
);
if
(
UseDepthFirstScavengeOrder
)
{
assert
(
manager
->
claimed_stack_depth
()
->
is_empty
(),
"should be empty"
);
}
else
{
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_depth
()
->
length
()
<=
0
),
"VM Thread promotion manager overflow stack "
"must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
overflow_stack_breadth
()
->
length
()
<=
0
),
"VM Thread promotion manager overflow stack "
"must be empty"
);
guarantee
((
!
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_depth
()
->
size
()
<=
0
),
"VM Thread promotion manager claimed stack "
"must be empty"
);
guarantee
((
UseDepthFirstScavengeOrder
||
manager
->
claimed_stack_breadth
()
->
size
()
<=
0
),
"VM Thread promotion manager claimed stack "
"must be empty"
);
assert
(
manager
->
claimed_stack_breadth
()
->
is_empty
(),
"should be empty"
);
}
manager
->
flush_labs
();
}
}
...
...
@@ -181,15 +149,9 @@ PSPromotionManager::PSPromotionManager() {
if
(
depth_first
())
{
claimed_stack_depth
()
->
initialize
();
queue_size
=
claimed_stack_depth
()
->
max_elems
();
// We want the overflow stack to be permanent
_overflow_stack_depth
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
StarTask
>
(
10
,
true
);
_overflow_stack_breadth
=
NULL
;
}
else
{
claimed_stack_breadth
()
->
initialize
();
queue_size
=
claimed_stack_breadth
()
->
max_elems
();
// We want the overflow stack to be permanent
_overflow_stack_breadth
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
oop
>
(
10
,
true
);
_overflow_stack_depth
=
NULL
;
}
_totally_drain
=
(
ParallelGCThreads
==
1
)
||
(
GCDrainStackTargetSize
==
0
);
...
...
@@ -209,8 +171,7 @@ PSPromotionManager::PSPromotionManager() {
}
void
PSPromotionManager
::
reset
()
{
assert
(
claimed_stack_empty
(),
"reset of non-empty claimed stack"
);
assert
(
overflow_stack_empty
(),
"reset of non-empty overflow stack"
);
assert
(
stacks_empty
(),
"reset of non-empty stack"
);
// We need to get an assert in here to make sure the labs are always flushed.
...
...
@@ -243,7 +204,7 @@ void PSPromotionManager::reset() {
void
PSPromotionManager
::
drain_stacks_depth
(
bool
totally_drain
)
{
assert
(
depth_first
(),
"invariant"
);
assert
(
overflow_stack_depth
()
!=
NULL
,
"invariant"
);
assert
(
claimed_stack_depth
()
->
overflow_stack
()
!=
NULL
,
"invariant"
);
totally_drain
=
totally_drain
||
_totally_drain
;
#ifdef ASSERT
...
...
@@ -254,41 +215,35 @@ void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
OopStarTaskQueue
*
const
tq
=
claimed_stack_depth
();
do
{
StarTask
p
;
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while
(
!
overflow_stack_depth
()
->
is_empty
())
{
// linux compiler wants different overloaded operator= in taskqueue to
// assign to p that the other compilers don't like.
StarTask
ptr
=
overflow_stack_depth
()
->
pop
();
process_popped_location_depth
(
ptr
);
while
(
tq
->
pop_overflow
(
p
))
{
process_popped_location_depth
(
p
);
}
if
(
totally_drain
)
{
while
(
claimed_stack_depth
()
->
pop_local
(
p
))
{
while
(
tq
->
pop_local
(
p
))
{
process_popped_location_depth
(
p
);
}
}
else
{
while
(
claimed_stack_depth
()
->
size
()
>
_target_stack_size
&&
claimed_stack_depth
()
->
pop_local
(
p
))
{
while
(
tq
->
size
()
>
_target_stack_size
&&
tq
->
pop_local
(
p
))
{
process_popped_location_depth
(
p
);
}
}
}
while
(
(
totally_drain
&&
claimed_stack_depth
()
->
size
()
>
0
)
||
(
overflow_stack_depth
()
->
length
()
>
0
)
);
assert
(
!
totally_drain
||
claimed_stack_empty
(),
"Sanity"
);
assert
(
totally_drain
||
claimed_stack_depth
()
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
overflow_stack_empty
(),
"Sanity"
);
}
while
(
totally_drain
&&
!
tq
->
taskqueue_empty
()
||
!
tq
->
overflow_empty
());
assert
(
!
totally_drain
||
tq
->
taskqueue_empty
(),
"Sanity"
);
assert
(
totally_drain
||
tq
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
tq
->
overflow_empty
(),
"Sanity"
);
}
void
PSPromotionManager
::
drain_stacks_breadth
(
bool
totally_drain
)
{
assert
(
!
depth_first
(),
"invariant"
);
assert
(
overflow_stack_breadth
()
!=
NULL
,
"invariant"
);
assert
(
claimed_stack_breadth
()
->
overflow_stack
()
!=
NULL
,
"invariant"
);
totally_drain
=
totally_drain
||
_totally_drain
;
#ifdef ASSERT
...
...
@@ -299,51 +254,39 @@ void PSPromotionManager::drain_stacks_breadth(bool totally_drain) {
MutableSpace
*
perm_space
=
heap
->
perm_gen
()
->
object_space
();
#endif
/* ASSERT */
OverflowTaskQueue
<
oop
>*
const
tq
=
claimed_stack_breadth
();
do
{
oop
obj
;
// Drain overflow stack first, so other threads can steal from
// claimed stack while we work.
while
(
!
overflow_stack_breadth
()
->
is_empty
())
{
obj
=
overflow_stack_breadth
()
->
pop
();
while
(
tq
->
pop_overflow
(
obj
))
{
obj
->
copy_contents
(
this
);
}
if
(
totally_drain
)
{
// obj is a reference!!!
while
(
claimed_stack_breadth
()
->
pop_local
(
obj
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
while
(
tq
->
pop_local
(
obj
))
{
obj
->
copy_contents
(
this
);
}
}
else
{
// obj is a reference!!!
while
(
claimed_stack_breadth
()
->
size
()
>
_target_stack_size
&&
claimed_stack_breadth
()
->
pop_local
(
obj
))
{
// It would be nice to assert about the type of objects we might
// pop, but they can come from anywhere, unfortunately.
while
(
tq
->
size
()
>
_target_stack_size
&&
tq
->
pop_local
(
obj
))
{
obj
->
copy_contents
(
this
);
}
}
// If we could not find any other work, flush the prefetch queue
if
(
claimed_stack_breadth
()
->
size
()
==
0
&&
(
overflow_stack_breadth
()
->
length
()
==
0
))
{
if
(
tq
->
is_empty
())
{
flush_prefetch_queue
();
}
}
while
((
totally_drain
&&
claimed_stack_breadth
()
->
size
()
>
0
)
||
(
overflow_stack_breadth
()
->
length
()
>
0
));
assert
(
!
totally_drain
||
claimed_stack_empty
(),
"Sanity"
);
assert
(
totally_drain
||
claimed_stack_breadth
()
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
overflow_stack_empty
(),
"Sanity"
);
}
while
(
totally_drain
&&
!
tq
->
taskqueue_empty
()
||
!
tq
->
overflow_empty
());
assert
(
!
totally_drain
||
tq
->
taskqueue_empty
(),
"Sanity"
);
assert
(
totally_drain
||
tq
->
size
()
<=
_target_stack_size
,
"Sanity"
);
assert
(
tq
->
overflow_empty
(),
"Sanity"
);
}
void
PSPromotionManager
::
flush_labs
()
{
assert
(
claimed_stack_empty
(),
"Attempt to flush lab with live stack"
);
assert
(
overflow_stack_empty
(),
"Attempt to flush lab with live overflow stack"
);
assert
(
stacks_empty
(),
"Attempt to flush lab with live stack"
);
// If either promotion lab fills up, we can flush the
// lab but not refill it, so check first.
...
...
src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2002, 20
08
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -78,9 +78,7 @@ class PSPromotionManager : public CHeapObj {
PrefetchQueue
_prefetch_queue
;
OopStarTaskQueue
_claimed_stack_depth
;
GrowableArray
<
StarTask
>*
_overflow_stack_depth
;
OopTaskQueue
_claimed_stack_breadth
;
GrowableArray
<
oop
>*
_overflow_stack_breadth
;
OverflowTaskQueue
<
oop
>
_claimed_stack_breadth
;
bool
_depth_first
;
bool
_totally_drain
;
...
...
@@ -97,9 +95,6 @@ class PSPromotionManager : public CHeapObj {
template
<
class
T
>
inline
void
claim_or_forward_internal_depth
(
T
*
p
);
template
<
class
T
>
inline
void
claim_or_forward_internal_breadth
(
T
*
p
);
GrowableArray
<
StarTask
>*
overflow_stack_depth
()
{
return
_overflow_stack_depth
;
}
GrowableArray
<
oop
>*
overflow_stack_breadth
()
{
return
_overflow_stack_breadth
;
}
// On the task queues we push reference locations as well as
// partially-scanned arrays (in the latter case, we push an oop to
// the from-space image of the array and the length on the
...
...
@@ -151,18 +146,19 @@ class PSPromotionManager : public CHeapObj {
#if PS_PM_STATS
++
_total_pushes
;
int
stack_length
=
claimed_stack_depth
()
->
overflow_stack
()
->
length
();
#endif // PS_PM_STATS
if
(
!
claimed_stack_depth
()
->
push
(
p
))
{
overflow_stack_depth
()
->
push
(
p
);
claimed_stack_depth
()
->
push
(
p
);
#if PS_PM_STATS
if
(
claimed_stack_depth
()
->
overflow_stack
()
->
length
()
!=
stack_length
)
{
++
_overflow_pushes
;
uint
stack_length
=
(
uint
)
overflow_stack_depth
()
->
length
();
if
(
stack_length
>
_max_overflow_length
)
{
_max_overflow_length
=
stack_length
;
if
((
uint
)
stack_length
+
1
>
_max_overflow_length
)
{
_max_overflow_length
=
(
uint
)
stack_length
+
1
;
}
#endif // PS_PM_STATS
}
#endif // PS_PM_STATS
}
void
push_breadth
(
oop
o
)
{
...
...
@@ -170,18 +166,19 @@ class PSPromotionManager : public CHeapObj {
#if PS_PM_STATS
++
_total_pushes
;
int
stack_length
=
claimed_stack_breadth
()
->
overflow_stack
()
->
length
();
#endif // PS_PM_STATS
if
(
!
claimed_stack_breadth
()
->
push
(
o
))
{
overflow_stack_breadth
()
->
push
(
o
);
claimed_stack_breadth
()
->
push
(
o
);
#if PS_PM_STATS
if
(
claimed_stack_breadth
()
->
overflow_stack
()
->
length
()
!=
stack_length
)
{
++
_overflow_pushes
;
uint
stack_length
=
(
uint
)
overflow_stack_breadth
()
->
length
();
if
(
stack_length
>
_max_overflow_length
)
{
_max_overflow_length
=
stack_length
;
if
((
uint
)
stack_length
+
1
>
_max_overflow_length
)
{
_max_overflow_length
=
(
uint
)
stack_length
+
1
;
}
#endif // PS_PM_STATS
}
#endif // PS_PM_STATS
}
protected:
...
...
@@ -199,12 +196,10 @@ class PSPromotionManager : public CHeapObj {
static
PSPromotionManager
*
vm_thread_promotion_manager
();
static
bool
steal_depth
(
int
queue_num
,
int
*
seed
,
StarTask
&
t
)
{
assert
(
stack_array_depth
()
!=
NULL
,
"invariant"
);
return
stack_array_depth
()
->
steal
(
queue_num
,
seed
,
t
);
}
static
bool
steal_breadth
(
int
queue_num
,
int
*
seed
,
Task
&
t
)
{
assert
(
stack_array_breadth
()
!=
NULL
,
"invariant"
);
static
bool
steal_breadth
(
int
queue_num
,
int
*
seed
,
oop
&
t
)
{
return
stack_array_breadth
()
->
steal
(
queue_num
,
seed
,
t
);
}
...
...
@@ -214,7 +209,7 @@ class PSPromotionManager : public CHeapObj {
OopStarTaskQueue
*
claimed_stack_depth
()
{
return
&
_claimed_stack_depth
;
}
O
opTaskQueue
*
claimed_stack_breadth
()
{
O
verflowTaskQueue
<
oop
>
*
claimed_stack_breadth
()
{
return
&
_claimed_stack_breadth
;
}
...
...
@@ -246,25 +241,13 @@ class PSPromotionManager : public CHeapObj {
void
drain_stacks_depth
(
bool
totally_drain
);
void
drain_stacks_breadth
(
bool
totally_drain
);
bool
claimed_stack_empty
()
{
if
(
depth_first
())
{
return
claimed_stack_depth
()
->
size
()
<=
0
;
}
else
{
return
claimed_stack_breadth
()
->
size
()
<=
0
;
}
}
bool
overflow_stack_empty
()
{
if
(
depth_first
())
{
return
overflow_stack_depth
()
->
length
()
<=
0
;
}
else
{
return
overflow_stack_breadth
()
->
length
()
<=
0
;
}
bool
depth_first
()
const
{
return
_depth_first
;
}
bool
stacks_empty
()
{
return
claimed_stack_empty
()
&&
overflow_stack_empty
();
}
bool
depth_first
()
{
return
_depth_first
;
return
depth_first
()
?
claimed_stack_depth
()
->
is_empty
()
:
claimed_stack_breadth
()
->
is_empty
();
}
inline
void
process_popped_location_depth
(
StarTask
p
);
...
...
src/share/vm/gc_implementation/parallelScavenge/psScavenge.cpp
浏览文件 @
cb5b0fb9
...
...
@@ -414,7 +414,6 @@ bool PSScavenge::invoke_no_policy() {
}
// Finally, flush the promotion_manager's labs, and deallocate its stacks.
assert
(
promotion_manager
->
claimed_stack_empty
(),
"Sanity"
);
PSPromotionManager
::
post_scavenge
();
promotion_failure_occurred
=
promotion_failed
();
...
...
src/share/vm/utilities/taskqueue.cpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2001, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -182,73 +182,3 @@ bool ObjArrayTask::is_valid() const {
_index
<
objArrayOop
(
_obj
)
->
length
();
}
#endif // ASSERT
bool
RegionTaskQueueWithOverflow
::
is_empty
()
{
return
(
_region_queue
.
size
()
==
0
)
&&
(
_overflow_stack
->
length
()
==
0
);
}
bool
RegionTaskQueueWithOverflow
::
stealable_is_empty
()
{
return
_region_queue
.
size
()
==
0
;
}
bool
RegionTaskQueueWithOverflow
::
overflow_is_empty
()
{
return
_overflow_stack
->
length
()
==
0
;
}
void
RegionTaskQueueWithOverflow
::
initialize
()
{
_region_queue
.
initialize
();
assert
(
_overflow_stack
==
0
,
"Creating memory leak"
);
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
RegionTask
>
(
10
,
true
);
}
void
RegionTaskQueueWithOverflow
::
save
(
RegionTask
t
)
{
if
(
TraceRegionTasksQueuing
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"CTQ: save "
PTR_FORMAT
,
t
);
}
if
(
!
_region_queue
.
push
(
t
))
{
_overflow_stack
->
push
(
t
);
}
}
// Note that using this method will retrieve all regions
// that have been saved but that it will always check
// the overflow stack. It may be more efficient to
// check the stealable queue and the overflow stack
// separately.
bool
RegionTaskQueueWithOverflow
::
retrieve
(
RegionTask
&
region_task
)
{
bool
result
=
retrieve_from_overflow
(
region_task
);
if
(
!
result
)
{
result
=
retrieve_from_stealable_queue
(
region_task
);
}
if
(
TraceRegionTasksQueuing
&&
Verbose
&&
result
)
{
gclog_or_tty
->
print_cr
(
" CTQ: retrieve "
PTR_FORMAT
,
result
);
}
return
result
;
}
bool
RegionTaskQueueWithOverflow
::
retrieve_from_stealable_queue
(
RegionTask
&
region_task
)
{
bool
result
=
_region_queue
.
pop_local
(
region_task
);
if
(
TraceRegionTasksQueuing
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"CTQ: retrieve_stealable "
PTR_FORMAT
,
region_task
);
}
return
result
;
}
bool
RegionTaskQueueWithOverflow
::
retrieve_from_overflow
(
RegionTask
&
region_task
)
{
bool
result
;
if
(
!
_overflow_stack
->
is_empty
())
{
region_task
=
_overflow_stack
->
pop
();
result
=
true
;
}
else
{
region_task
=
(
RegionTask
)
NULL
;
result
=
false
;
}
if
(
TraceRegionTasksQueuing
&&
Verbose
)
{
gclog_or_tty
->
print_cr
(
"CTQ: retrieve_stealable "
PTR_FORMAT
,
region_task
);
}
return
result
;
}
src/share/vm/utilities/taskqueue.hpp
浏览文件 @
cb5b0fb9
/*
* Copyright (c) 2001, 20
09
, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 20
10
, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
...
...
@@ -109,8 +109,9 @@ protected:
public:
TaskQueueSuper
()
:
_bottom
(
0
),
_age
()
{}
// Return true if the TaskQueue contains any tasks.
bool
peek
()
{
return
_bottom
!=
_age
.
top
();
}
// Return true if the TaskQueue contains/does not contain any tasks.
bool
peek
()
const
{
return
_bottom
!=
_age
.
top
();
}
bool
is_empty
()
const
{
return
size
()
==
0
;
}
// Return an estimate of the number of elements in the queue.
// The "careful" version admits the possibility of pop_local/pop_global
...
...
@@ -165,18 +166,16 @@ public:
void
initialize
();
// Push the task "t" on the queue. Returns "false" iff the queue is
// full.
// Push the task "t" on the queue. Returns "false" iff the queue is full.
inline
bool
push
(
E
t
);
//
If succeeds in claiming a task (from the 'local' end, that is, the
//
most recently pushed task), returns "true" and sets "t" to that task.
//
Otherwise, the queue is empty and returns false
.
//
Attempts to claim a task from the "local" end of the queue (the most
//
recently pushed). If successful, returns true and sets t to the task;
//
otherwise, returns false (the queue is empty)
.
inline
bool
pop_local
(
E
&
t
);
// If succeeds in claiming a task (from the 'global' end, that is, the
// least recently pushed task), returns "true" and sets "t" to that task.
// Otherwise, the queue is empty and returns false.
// Like pop_local(), but uses the "global" end of the queue (the least
// recently pushed).
bool
pop_global
(
E
&
t
);
// Delete any resource associated with the queue.
...
...
@@ -198,7 +197,6 @@ GenericTaskQueue<E, N>::GenericTaskQueue() {
template
<
class
E
,
unsigned
int
N
>
void
GenericTaskQueue
<
E
,
N
>::
initialize
()
{
_elems
=
NEW_C_HEAP_ARRAY
(
E
,
N
);
guarantee
(
_elems
!=
NULL
,
"Allocation failed."
);
}
template
<
class
E
,
unsigned
int
N
>
...
...
@@ -289,7 +287,87 @@ GenericTaskQueue<E, N>::~GenericTaskQueue() {
FREE_C_HEAP_ARRAY
(
E
,
_elems
);
}
// Inherits the typedef of "Task" from above.
// OverflowTaskQueue is a TaskQueue that also includes an overflow stack for
// elements that do not fit in the TaskQueue.
//
// Three methods from super classes are overridden:
//
// initialize() - initialize the super classes and create the overflow stack
// push() - push onto the task queue or, if that fails, onto the overflow stack
// is_empty() - return true if both the TaskQueue and overflow stack are empty
//
// Note that size() is not overridden--it returns the number of elements in the
// TaskQueue, and does not include the size of the overflow stack. This
// simplifies replacement of GenericTaskQueues with OverflowTaskQueues.
template
<
class
E
,
unsigned
int
N
=
TASKQUEUE_SIZE
>
class
OverflowTaskQueue
:
public
GenericTaskQueue
<
E
,
N
>
{
public:
typedef
GrowableArray
<
E
>
overflow_t
;
typedef
GenericTaskQueue
<
E
,
N
>
taskqueue_t
;
OverflowTaskQueue
();
~
OverflowTaskQueue
();
void
initialize
();
inline
overflow_t
*
overflow_stack
()
const
{
return
_overflow_stack
;
}
// Push task t onto the queue or onto the overflow stack. Return true.
inline
bool
push
(
E
t
);
// Attempt to pop from the overflow stack; return true if anything was popped.
inline
bool
pop_overflow
(
E
&
t
);
inline
bool
taskqueue_empty
()
const
{
return
taskqueue_t
::
is_empty
();
}
inline
bool
overflow_empty
()
const
{
return
overflow_stack
()
->
is_empty
();
}
inline
bool
is_empty
()
const
{
return
taskqueue_empty
()
&&
overflow_empty
();
}
private:
overflow_t
*
_overflow_stack
;
};
template
<
class
E
,
unsigned
int
N
>
OverflowTaskQueue
<
E
,
N
>::
OverflowTaskQueue
()
{
_overflow_stack
=
NULL
;
}
template
<
class
E
,
unsigned
int
N
>
OverflowTaskQueue
<
E
,
N
>::~
OverflowTaskQueue
()
{
if
(
_overflow_stack
!=
NULL
)
{
delete
_overflow_stack
;
_overflow_stack
=
NULL
;
}
}
template
<
class
E
,
unsigned
int
N
>
void
OverflowTaskQueue
<
E
,
N
>::
initialize
()
{
taskqueue_t
::
initialize
();
assert
(
_overflow_stack
==
NULL
,
"memory leak"
);
_overflow_stack
=
new
(
ResourceObj
::
C_HEAP
)
GrowableArray
<
E
>
(
10
,
true
);
}
template
<
class
E
,
unsigned
int
N
>
bool
OverflowTaskQueue
<
E
,
N
>::
push
(
E
t
)
{
if
(
!
taskqueue_t
::
push
(
t
))
{
overflow_stack
()
->
push
(
t
);
}
return
true
;
}
template
<
class
E
,
unsigned
int
N
>
bool
OverflowTaskQueue
<
E
,
N
>::
pop_overflow
(
E
&
t
)
{
if
(
overflow_empty
())
return
false
;
t
=
overflow_stack
()
->
pop
();
return
true
;
}
class
TaskQueueSetSuper
:
public
CHeapObj
{
protected:
static
int
randomParkAndMiller
(
int
*
seed0
);
...
...
@@ -323,11 +401,11 @@ public:
T
*
queue
(
uint
n
);
// The thread with queue number "queue_num" (and whose random number seed
//
is at "seed") is trying to steal a task from some other queue. (It
//
may try several queues, according to some configuration parameter.)
//
If some steal succeeds, returns "true" and sets "t" the stolen task,
//
otherwise returns
false.
// The thread with queue number "queue_num" (and whose random number seed
is
//
at "seed") is trying to steal a task from some other queue. (It may try
//
several queues, according to some configuration parameter.) If some steal
//
succeeds, returns "true" and sets "t" to the stolen task, otherwise returns
// false.
bool
steal
(
uint
queue_num
,
int
*
seed
,
E
&
t
);
bool
peek
();
...
...
@@ -507,7 +585,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
uint
localBot
=
_bottom
;
// This value cannot be N-1. That can only occur as a result of
// the assignment to bottom in this method. If it does, this method
// resets the size
(
to 0 before the next call (which is sequential,
// resets the size to 0 before the next call (which is sequential,
// since this is pop_local.)
uint
dirty_n_elems
=
dirty_size
(
localBot
,
_age
.
top
());
assert
(
dirty_n_elems
!=
N
-
1
,
"Shouldn't be possible..."
);
...
...
@@ -533,8 +611,7 @@ GenericTaskQueue<E, N>::pop_local(E& t) {
}
}
typedef
oop
Task
;
typedef
GenericTaskQueue
<
Task
>
OopTaskQueue
;
typedef
GenericTaskQueue
<
oop
>
OopTaskQueue
;
typedef
GenericTaskQueueSet
<
OopTaskQueue
>
OopTaskQueueSet
;
#ifdef _MSC_VER
...
...
@@ -615,35 +692,8 @@ private:
#pragma warning(pop)
#endif
typedef
GenericTaskQueue
<
StarTask
>
OopStarTaskQueue
;
typedef
OverflowTaskQueue
<
StarTask
>
OopStarTaskQueue
;
typedef
GenericTaskQueueSet
<
OopStarTaskQueue
>
OopStarTaskQueueSet
;
typedef
size_t
RegionTask
;
// index for region
typedef
GenericTaskQueue
<
RegionTask
>
RegionTaskQueue
;
typedef
GenericTaskQueueSet
<
RegionTaskQueue
>
RegionTaskQueueSet
;
class
RegionTaskQueueWithOverflow
:
public
CHeapObj
{
protected:
RegionTaskQueue
_region_queue
;
GrowableArray
<
RegionTask
>*
_overflow_stack
;
public:
RegionTaskQueueWithOverflow
()
:
_overflow_stack
(
NULL
)
{}
// Initialize both stealable queue and overflow
void
initialize
();
// Save first to stealable queue and then to overflow
void
save
(
RegionTask
t
);
// Retrieve first from overflow and then from stealable queue
bool
retrieve
(
RegionTask
&
region_index
);
// Retrieve from stealable queue
bool
retrieve_from_stealable_queue
(
RegionTask
&
region_index
);
// Retrieve from overflow
bool
retrieve_from_overflow
(
RegionTask
&
region_index
);
bool
is_empty
();
bool
stealable_is_empty
();
bool
overflow_is_empty
();
uint
stealable_size
()
{
return
_region_queue
.
size
();
}
RegionTaskQueue
*
task_queue
()
{
return
&
_region_queue
;
}
};
#define USE_RegionTaskQueueWithOverflow
typedef
OverflowTaskQueue
<
size_t
>
RegionTaskQueue
;
typedef
GenericTaskQueueSet
<
RegionTaskQueue
>
RegionTaskQueueSet
;
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录