Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
5a82c0b0
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
5a82c0b0
编写于
12月 06, 2010
作者:
C
coleenp
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
a0598de6
8bbfe8dd
变更
10
显示空白变更内容
内联
并排
Showing
10 changed file
with
1057 addition
and
400 deletion
+1057
-400
src/share/vm/gc_implementation/g1/concurrentMark.cpp
src/share/vm/gc_implementation/g1/concurrentMark.cpp
+9
-1
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+569
-196
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+234
-47
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
+106
-28
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+16
-51
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
+10
-7
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+52
-3
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
+31
-39
src/share/vm/memory/referenceProcessor.cpp
src/share/vm/memory/referenceProcessor.cpp
+14
-25
src/share/vm/runtime/thread.hpp
src/share/vm/runtime/thread.hpp
+16
-3
未找到文件。
src/share/vm/gc_implementation/g1/concurrentMark.cpp
浏览文件 @
5a82c0b0
...
@@ -1051,6 +1051,7 @@ public:
...
@@ -1051,6 +1051,7 @@ public:
void
work
(
int
worker_i
)
{
void
work
(
int
worker_i
)
{
assert
(
Thread
::
current
()
->
is_ConcurrentGC_thread
(),
assert
(
Thread
::
current
()
->
is_ConcurrentGC_thread
(),
"this should only be done by a conc GC thread"
);
"this should only be done by a conc GC thread"
);
ResourceMark
rm
;
double
start_vtime
=
os
::
elapsedVTime
();
double
start_vtime
=
os
::
elapsedVTime
();
...
@@ -1888,6 +1889,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
...
@@ -1888,6 +1889,9 @@ void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
ReferenceProcessor
*
rp
=
g1h
->
ref_processor
();
ReferenceProcessor
*
rp
=
g1h
->
ref_processor
();
// See the comment in G1CollectedHeap::ref_processing_init()
// about how reference processing currently works in G1.
// Process weak references.
// Process weak references.
rp
->
setup_policy
(
clear_all_soft_refs
);
rp
->
setup_policy
(
clear_all_soft_refs
);
assert
(
_markStack
.
isEmpty
(),
"mark stack should be empty"
);
assert
(
_markStack
.
isEmpty
(),
"mark stack should be empty"
);
...
@@ -2918,7 +2922,11 @@ public:
...
@@ -2918,7 +2922,11 @@ public:
CMOopClosure
(
G1CollectedHeap
*
g1h
,
CMOopClosure
(
G1CollectedHeap
*
g1h
,
ConcurrentMark
*
cm
,
ConcurrentMark
*
cm
,
CMTask
*
task
)
CMTask
*
task
)
:
_g1h
(
g1h
),
_cm
(
cm
),
_task
(
task
)
{
}
:
_g1h
(
g1h
),
_cm
(
cm
),
_task
(
task
)
{
_ref_processor
=
g1h
->
ref_processor
();
assert
(
_ref_processor
!=
NULL
,
"should not be NULL"
);
}
};
};
void
CMTask
::
setup_for_region
(
HeapRegion
*
hr
)
{
void
CMTask
::
setup_for_region
(
HeapRegion
*
hr
)
{
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
5a82c0b0
...
@@ -58,10 +58,11 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
...
@@ -58,10 +58,11 @@ size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
// INVARIANTS/NOTES
// INVARIANTS/NOTES
//
//
// All allocation activity covered by the G1CollectedHeap interface is
// All allocation activity covered by the G1CollectedHeap interface is
// serialized by acquiring the HeapLock. This happens in
// serialized by acquiring the HeapLock. This happens in mem_allocate
// mem_allocate_work, which all such allocation functions call.
// and allocate_new_tlab, which are the "entry" points to the
// (Note that this does not apply to TLAB allocation, which is not part
// allocation code from the rest of the JVM. (Note that this does not
// of this interface: it is done by clients of this interface.)
// apply to TLAB allocation, which is not part of this interface: it
// is done by clients of this interface.)
// Local to this file.
// Local to this file.
...
@@ -536,15 +537,17 @@ HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
...
@@ -536,15 +537,17 @@ HeapRegion* G1CollectedHeap::newAllocRegionWithExpansion(int purpose,
// If could fit into free regions w/o expansion, try.
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
// Otherwise, if using ex regions might help, try with ex given back.
HeapWord
*
G1CollectedHeap
::
humongousObjAllocate
(
size_t
word_size
)
{
HeapWord
*
G1CollectedHeap
::
humongous_obj_allocate
(
size_t
word_size
)
{
assert_heap_locked_or_at_safepoint
();
assert
(
regions_accounted_for
(),
"Region leakage!"
);
assert
(
regions_accounted_for
(),
"Region leakage!"
);
// We can't allocate
H regions while cleanupComplete is running, since
// We can't allocate
humongous regions while cleanupComplete is
//
some of the regions we find to be empty might not yet be added to the
//
running, since some of the regions we find to be empty might not
//
unclean list. (If we're already at a safepoint, this call is
//
yet be added to the unclean list. If we're already at a
//
unnecessary, not to mention wrong.)
//
safepoint, this call is unnecessary, not to mention wrong.
if
(
!
SafepointSynchronize
::
is_at_safepoint
())
if
(
!
SafepointSynchronize
::
is_at_safepoint
())
{
wait_for_cleanup_complete
();
wait_for_cleanup_complete
();
}
size_t
num_regions
=
size_t
num_regions
=
round_to
(
word_size
,
HeapRegion
::
GrainWords
)
/
HeapRegion
::
GrainWords
;
round_to
(
word_size
,
HeapRegion
::
GrainWords
)
/
HeapRegion
::
GrainWords
;
...
@@ -598,106 +601,391 @@ HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
...
@@ -598,106 +601,391 @@ HeapWord* G1CollectedHeap::humongousObjAllocate(size_t word_size) {
return
res
;
return
res
;
}
}
HeapWord
*
void
G1CollectedHeap
::
attempt_allocation_slow
(
size_t
word_size
,
G1CollectedHeap
::
retire_cur_alloc_region
(
HeapRegion
*
cur_alloc_region
)
{
bool
permit_collection_pause
)
{
// The cleanup operation might update _summary_bytes_used
HeapWord
*
res
=
NULL
;
// concurrently with this method. So, right now, if we don't wait
HeapRegion
*
allocated_young_region
=
NULL
;
// for it to complete, updates to _summary_bytes_used might get
// lost. This will be resolved in the near future when the operation
assert
(
SafepointSynchronize
::
is_at_safepoint
()
||
// of the free region list is revamped as part of CR 6977804.
Heap_lock
->
owned_by_self
(),
"pre condition of the call"
);
wait_for_cleanup_complete
();
if
(
isHumongous
(
word_size
))
{
// Allocation of a humongous object can, in a sense, complete a
// partial region, if the previous alloc was also humongous, and
// caused the test below to succeed.
if
(
permit_collection_pause
)
do_collection_pause_if_appropriate
(
word_size
);
res
=
humongousObjAllocate
(
word_size
);
assert
(
_cur_alloc_region
==
NULL
||
!
_cur_alloc_region
->
isHumongous
(),
"Prevent a regression of this bug."
);
}
else
{
retire_cur_alloc_region_common
(
cur_alloc_region
);
// We may have concurrent cleanup working at the time. Wait for it
assert
(
_cur_alloc_region
==
NULL
,
"post-condition"
);
// to complete. In the future we would probably want to make the
}
// concurrent cleanup truly concurrent by decoupling it from the
// allocation.
// See the comment in the .hpp file about the locking protocol and
if
(
!
SafepointSynchronize
::
is_at_safepoint
())
// assumptions of this method (and other related ones).
HeapWord
*
G1CollectedHeap
::
replace_cur_alloc_region_and_allocate
(
size_t
word_size
,
bool
at_safepoint
,
bool
do_dirtying
)
{
assert_heap_locked_or_at_safepoint
();
assert
(
_cur_alloc_region
==
NULL
,
"replace_cur_alloc_region_and_allocate() should only be called "
"after retiring the previous current alloc region"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
()
==
at_safepoint
,
"at_safepoint and is_at_safepoint() should be a tautology"
);
if
(
!
g1_policy
()
->
is_young_list_full
())
{
if
(
!
at_safepoint
)
{
// The cleanup operation might update _summary_bytes_used
// concurrently with this method. So, right now, if we don't
// wait for it to complete, updates to _summary_bytes_used might
// get lost. This will be resolved in the near future when the
// operation of the free region list is revamped as part of
// CR 6977804. If we're already at a safepoint, this call is
// unnecessary, not to mention wrong.
wait_for_cleanup_complete
();
wait_for_cleanup_complete
();
// If we do a collection pause, this will be reset to a non-NULL
// value. If we don't, nulling here ensures that we allocate a new
// region below.
if
(
_cur_alloc_region
!=
NULL
)
{
// We're finished with the _cur_alloc_region.
// As we're builing (at least the young portion) of the collection
// set incrementally we'll add the current allocation region to
// the collection set here.
if
(
_cur_alloc_region
->
is_young
())
{
g1_policy
()
->
add_region_to_incremental_cset_lhs
(
_cur_alloc_region
);
}
}
_summary_bytes_used
+=
_cur_alloc_region
->
used
();
_cur_alloc_region
=
NULL
;
HeapRegion
*
new_cur_alloc_region
=
newAllocRegion
(
word_size
,
false
/* zero_filled */
);
if
(
new_cur_alloc_region
!=
NULL
)
{
assert
(
new_cur_alloc_region
->
is_empty
(),
"the newly-allocated region should be empty, "
"as right now we only allocate new regions out of the free list"
);
g1_policy
()
->
update_region_num
(
true
/* next_is_young */
);
_summary_bytes_used
-=
new_cur_alloc_region
->
used
();
set_region_short_lived_locked
(
new_cur_alloc_region
);
assert
(
!
new_cur_alloc_region
->
isHumongous
(),
"Catch a regression of this bug."
);
// We need to ensure that the stores to _cur_alloc_region and,
// subsequently, to top do not float above the setting of the
// young type.
OrderAccess
::
storestore
();
// Now allocate out of the new current alloc region. We could
// have re-used allocate_from_cur_alloc_region() but its
// operation is slightly different to what we need here. First,
// allocate_from_cur_alloc_region() is only called outside a
// safepoint and will always unlock the Heap_lock if it returns
// a non-NULL result. Second, it assumes that the current alloc
// region is what's already assigned in _cur_alloc_region. What
// we want here is to actually do the allocation first before we
// assign the new region to _cur_alloc_region. This ordering is
// not currently important, but it will be essential when we
// change the code to support CAS allocation in the future (see
// CR 6994297).
//
// This allocate method does BOT updates and we don't need them in
// the young generation. This will be fixed in the near future by
// CR 6994297.
HeapWord
*
result
=
new_cur_alloc_region
->
allocate
(
word_size
);
assert
(
result
!=
NULL
,
"we just allocate out of an empty region "
"so allocation should have been successful"
);
assert
(
is_in
(
result
),
"result should be in the heap"
);
_cur_alloc_region
=
new_cur_alloc_region
;
if
(
!
at_safepoint
)
{
Heap_lock
->
unlock
();
}
}
assert
(
_cur_alloc_region
==
NULL
,
"Invariant."
);
// Completion of a heap region is perhaps a good point at which to do
// do the dirtying, if necessary, after we release the Heap_lock
// a collection pause.
if
(
do_dirtying
)
{
if
(
permit_collection_pause
)
dirty_young_block
(
result
,
word_size
);
do_collection_pause_if_appropriate
(
word_size
);
}
// Make sure we have an allocation region available.
return
result
;
if
(
_cur_alloc_region
==
NULL
)
{
}
if
(
!
SafepointSynchronize
::
is_at_safepoint
())
}
assert
(
_cur_alloc_region
==
NULL
,
"we failed to allocate a new current "
"alloc region, it should still be NULL"
);
assert_heap_locked_or_at_safepoint
();
return
NULL
;
}
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
HeapWord
*
G1CollectedHeap
::
attempt_allocation_slow
(
size_t
word_size
)
{
assert_heap_locked_and_not_at_safepoint
();
assert
(
!
isHumongous
(
word_size
),
"attempt_allocation_slow() should not be "
"used for humongous allocations"
);
// We will loop while succeeded is false, which means that we tried
// to do a collection, but the VM op did not succeed. So, when we
// exit the loop, either one of the allocation attempts was
// successful, or we succeeded in doing the VM op but which was
// unable to allocate after the collection.
for
(
int
try_count
=
1
;
/* we'll return or break */
;
try_count
+=
1
)
{
bool
succeeded
=
true
;
{
// We may have concurrent cleanup working at the time. Wait for
// it to complete. In the future we would probably want to make
// the concurrent cleanup truly concurrent by decoupling it from
// the allocation. This will happen in the near future as part
// of CR 6977804 which will revamp the operation of the free
// region list. The fact that wait_for_cleanup_complete() will
// do a wait() means that we'll give up the Heap_lock. So, it's
// possible that when we exit wait_for_cleanup_complete() we
// might be able to allocate successfully (since somebody else
// might have done a collection meanwhile). So, we'll attempt to
// allocate again, just in case. When we make cleanup truly
// concurrent with allocation, we should remove this allocation
// attempt as it's redundant (we only reach here after an
// allocation attempt has been unsuccessful).
wait_for_cleanup_complete
();
wait_for_cleanup_complete
();
bool
next_is_young
=
should_set_young_locked
();
HeapWord
*
result
=
attempt_allocation
(
word_size
);
// If the next region is not young, make sure it's zero-filled.
if
(
result
!=
NULL
)
{
_cur_alloc_region
=
newAllocRegion
(
word_size
,
!
next_is_young
);
assert_heap_not_locked
();
if
(
_cur_alloc_region
!=
NULL
)
{
return
result
;
_summary_bytes_used
-=
_cur_alloc_region
->
used
();
if
(
next_is_young
)
{
set_region_short_lived_locked
(
_cur_alloc_region
);
allocated_young_region
=
_cur_alloc_region
;
}
}
}
}
if
(
GC_locker
::
is_active_and_needs_gc
())
{
// We are locked out of GC because of the GC locker. Right now,
// we'll just stall until the GC locker-induced GC
// completes. This will be fixed in the near future by extending
// the eden while waiting for the GC locker to schedule the GC
// (see CR 6994056).
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread
*
jthr
=
JavaThread
::
current
();
assert
(
jthr
!=
NULL
,
"sanity"
);
if
(
!
jthr
->
in_critical
())
{
MutexUnlocker
mul
(
Heap_lock
);
GC_locker
::
stall_until_clear
();
// We'll then fall off the end of the ("if GC locker active")
// if-statement and retry the allocation further down in the
// loop.
}
else
{
if
(
CheckJNICalls
)
{
fatal
(
"Possible deadlock due to allocating while"
" in jni critical section"
);
}
}
assert
(
_cur_alloc_region
==
NULL
||
!
_cur_alloc_region
->
isHumongous
(),
return
NULL
;
"Prevent a regression of this bug."
);
}
}
else
{
// We are not locked out. So, let's try to do a GC. The VM op
// will retry the allocation before it completes.
// Now retry the allocation.
// Read the GC count while holding the Heap_lock
if
(
_cur_alloc_region
!=
NULL
)
{
unsigned
int
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
if
(
allocated_young_region
!=
NULL
)
{
// We need to ensure that the store to top does not
Heap_lock
->
unlock
();
// float above the setting of the young type.
OrderAccess
::
storestore
();
HeapWord
*
result
=
do_collection_pause
(
word_size
,
gc_count_before
,
&
succeeded
);
assert_heap_not_locked
();
if
(
result
!=
NULL
)
{
assert
(
succeeded
,
"the VM op should have succeeded"
);
// Allocations that take place on VM operations do not do any
// card dirtying and we have to do it here.
dirty_young_block
(
result
,
word_size
);
return
result
;
}
}
res
=
_cur_alloc_region
->
allocate
(
word_size
);
Heap_lock
->
lock
();
}
}
assert_heap_locked
();
// We can reach here when we were unsuccessful in doing a GC,
// because another thread beat us to it, or because we were locked
// out of GC due to the GC locker. In either case a new alloc
// region might be available so we will retry the allocation.
HeapWord
*
result
=
attempt_allocation
(
word_size
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
}
// NOTE: fails frequently in PRT
// So far our attempts to allocate failed. The only time we'll go
assert
(
regions_accounted_for
(),
"Region leakage!"
);
// around the loop and try again is if we tried to do a GC and the
// VM op that we tried to schedule was not successful because
// another thread beat us to it. If that happened it's possible
// that by the time we grabbed the Heap_lock again and tried to
// allocate other threads filled up the young generation, which
// means that the allocation attempt after the GC also failed. So,
// it's worth trying to schedule another GC pause.
if
(
succeeded
)
{
break
;
}
if
(
res
!=
NULL
)
{
// Give a warning if we seem to be looping forever.
if
(
!
SafepointSynchronize
::
is_at_safepoint
())
{
if
((
QueuedAllocationWarningCount
>
0
)
&&
assert
(
permit_collection_pause
,
"invariant"
);
(
try_count
%
QueuedAllocationWarningCount
==
0
))
{
assert
(
Heap_lock
->
owned_by_self
(),
"invariant"
);
warning
(
"G1CollectedHeap::attempt_allocation_slow() "
"retries %d times"
,
try_count
);
}
}
assert_heap_locked
();
return
NULL
;
}
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
HeapWord
*
G1CollectedHeap
::
attempt_allocation_humongous
(
size_t
word_size
,
bool
at_safepoint
)
{
// This is the method that will allocate a humongous object. All
// allocation paths that attempt to allocate a humongous object
// should eventually reach here. Currently, the only paths are from
// mem_allocate() and attempt_allocation_at_safepoint().
assert_heap_locked_or_at_safepoint
();
assert
(
isHumongous
(
word_size
),
"attempt_allocation_humongous() "
"should only be used for humongous allocations"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
()
==
at_safepoint
,
"at_safepoint and is_at_safepoint() should be a tautology"
);
HeapWord
*
result
=
NULL
;
// We will loop while succeeded is false, which means that we tried
// to do a collection, but the VM op did not succeed. So, when we
// exit the loop, either one of the allocation attempts was
// successful, or we succeeded in doing the VM op but which was
// unable to allocate after the collection.
for
(
int
try_count
=
1
;
/* we'll return or break */
;
try_count
+=
1
)
{
bool
succeeded
=
true
;
// Given that humongous objects are not allocated in young
// regions, we'll first try to do the allocation without doing a
// collection hoping that there's enough space in the heap.
result
=
humongous_obj_allocate
(
word_size
);
assert
(
_cur_alloc_region
==
NULL
||
!
_cur_alloc_region
->
isHumongous
(),
"catch a regression of this bug."
);
if
(
result
!=
NULL
)
{
if
(
!
at_safepoint
)
{
// If we're not at a safepoint, unlock the Heap_lock.
Heap_lock
->
unlock
();
Heap_lock
->
unlock
();
}
}
return
result
;
}
if
(
allocated_young_region
!=
NULL
)
{
// If we failed to allocate the humongous object, we should try to
HeapRegion
*
hr
=
allocated_young_region
;
// do a collection pause (if we're allowed) in case it reclaims
HeapWord
*
bottom
=
hr
->
bottom
();
// enough space for the allocation to succeed after the pause.
HeapWord
*
end
=
hr
->
end
();
if
(
!
at_safepoint
)
{
MemRegion
mr
(
bottom
,
end
);
// Read the GC count while holding the Heap_lock
((
CardTableModRefBS
*
)
_g1h
->
barrier_set
())
->
dirty
(
mr
);
unsigned
int
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
// If we're allowed to do a collection we're not at a
// safepoint, so it is safe to unlock the Heap_lock.
Heap_lock
->
unlock
();
result
=
do_collection_pause
(
word_size
,
gc_count_before
,
&
succeeded
);
assert_heap_not_locked
();
if
(
result
!=
NULL
)
{
assert
(
succeeded
,
"the VM op should have succeeded"
);
return
result
;
}
// If we get here, the VM operation either did not succeed
// (i.e., another thread beat us to it) or it succeeded but
// failed to allocate the object.
// If we're allowed to do a collection we're not at a
// safepoint, so it is safe to lock the Heap_lock.
Heap_lock
->
lock
();
}
assert
(
result
==
NULL
,
"otherwise we should have exited the loop earlier"
);
// So far our attempts to allocate failed. The only time we'll go
// around the loop and try again is if we tried to do a GC and the
// VM op that we tried to schedule was not successful because
// another thread beat us to it. That way it's possible that some
// space was freed up by the thread that successfully scheduled a
// GC. So it's worth trying to allocate again.
if
(
succeeded
)
{
break
;
}
// Give a warning if we seem to be looping forever.
if
((
QueuedAllocationWarningCount
>
0
)
&&
(
try_count
%
QueuedAllocationWarningCount
==
0
))
{
warning
(
"G1CollectedHeap::attempt_allocation_humongous "
"retries %d times"
,
try_count
);
}
}
}
}
assert
(
SafepointSynchronize
::
is_at_safepoint
()
||
assert_heap_locked_or_at_safepoint
();
(
res
==
NULL
&&
Heap_lock
->
owned_by_self
())
||
return
NULL
;
(
res
!=
NULL
&&
!
Heap_lock
->
owned_by_self
()),
}
"post condition of the call"
);
return
res
;
HeapWord
*
G1CollectedHeap
::
attempt_allocation_at_safepoint
(
size_t
word_size
,
bool
expect_null_cur_alloc_region
)
{
assert_at_safepoint
();
assert
(
_cur_alloc_region
==
NULL
||
!
expect_null_cur_alloc_region
,
err_msg
(
"the current alloc region was unexpectedly found "
"to be non-NULL, cur alloc region: "
PTR_FORMAT
" "
"expect_null_cur_alloc_region: %d word_size: "
SIZE_FORMAT
,
_cur_alloc_region
,
expect_null_cur_alloc_region
,
word_size
));
if
(
!
isHumongous
(
word_size
))
{
if
(
!
expect_null_cur_alloc_region
)
{
HeapRegion
*
cur_alloc_region
=
_cur_alloc_region
;
if
(
cur_alloc_region
!=
NULL
)
{
// This allocate method does BOT updates and we don't need them in
// the young generation. This will be fixed in the near future by
// CR 6994297.
HeapWord
*
result
=
cur_alloc_region
->
allocate
(
word_size
);
if
(
result
!=
NULL
)
{
assert
(
is_in
(
result
),
"result should be in the heap"
);
// We will not do any dirtying here. This is guaranteed to be
// called during a safepoint and the thread that scheduled the
// pause will do the dirtying if we return a non-NULL result.
return
result
;
}
retire_cur_alloc_region_common
(
cur_alloc_region
);
}
}
assert
(
_cur_alloc_region
==
NULL
,
"at this point we should have no cur alloc region"
);
return
replace_cur_alloc_region_and_allocate
(
word_size
,
true
,
/* at_safepoint */
false
/* do_dirtying */
);
}
else
{
return
attempt_allocation_humongous
(
word_size
,
true
/* at_safepoint */
);
}
ShouldNotReachHere
();
}
HeapWord
*
G1CollectedHeap
::
allocate_new_tlab
(
size_t
word_size
)
{
assert_heap_not_locked_and_not_at_safepoint
();
assert
(
!
isHumongous
(
word_size
),
"we do not allow TLABs of humongous size"
);
Heap_lock
->
lock
();
// First attempt: try allocating out of the current alloc region or
// after replacing the current alloc region.
HeapWord
*
result
=
attempt_allocation
(
word_size
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
assert_heap_locked
();
// Second attempt: go into the even slower path where we might
// try to schedule a collection.
result
=
attempt_allocation_slow
(
word_size
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
assert_heap_locked
();
Heap_lock
->
unlock
();
return
NULL
;
}
}
HeapWord
*
HeapWord
*
...
@@ -705,46 +993,82 @@ G1CollectedHeap::mem_allocate(size_t word_size,
...
@@ -705,46 +993,82 @@ G1CollectedHeap::mem_allocate(size_t word_size,
bool
is_noref
,
bool
is_noref
,
bool
is_tlab
,
bool
is_tlab
,
bool
*
gc_overhead_limit_was_exceeded
)
{
bool
*
gc_overhead_limit_was_exceeded
)
{
debug_only
(
check_for_valid_allocation_state
()
);
assert_heap_not_locked_and_not_at_safepoint
(
);
assert
(
no_gc_in_progress
(),
"Allocation during gc not allowed"
);
assert
(
!
is_tlab
,
"mem_allocate() this should not be called directly "
HeapWord
*
result
=
NULL
;
"to allocate TLABs"
)
;
// Loop until the allocation is satisified,
// Loop until the allocation is satisified,
// or unsatisfied after GC.
// or unsatisfied after GC.
for
(
int
try_count
=
1
;
/*
return or throw
*/
;
try_count
+=
1
)
{
for
(
int
try_count
=
1
;
/*
we'll return
*/
;
try_count
+=
1
)
{
int
gc_count_before
;
unsigned
int
gc_count_before
;
{
{
Heap_lock
->
lock
();
Heap_lock
->
lock
();
result
=
attempt_allocation
(
word_size
);
if
(
!
isHumongous
(
word_size
))
{
// First attempt: try allocating out of the current alloc
// region or after replacing the current alloc region.
HeapWord
*
result
=
attempt_allocation
(
word_size
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
assert_heap_locked
();
// Second attempt: go into the even slower path where we might
// try to schedule a collection.
result
=
attempt_allocation_slow
(
word_size
);
if
(
result
!=
NULL
)
{
if
(
result
!=
NULL
)
{
// attempt_allocation should have unlocked the heap lock
assert_heap_not_locked
();
assert
(
is_in
(
result
),
"result not in heap"
);
return
result
;
return
result
;
}
}
}
else
{
HeapWord
*
result
=
attempt_allocation_humongous
(
word_size
,
false
/* at_safepoint */
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
}
assert_heap_locked
();
// Read the gc count while the heap lock is held.
// Read the gc count while the heap lock is held.
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
// We cannot be at a safepoint, so it is safe to unlock the Heap_lock
Heap_lock
->
unlock
();
Heap_lock
->
unlock
();
}
}
// Create the garbage collection operation...
// Create the garbage collection operation...
VM_G1CollectForAllocation
op
(
word_size
,
VM_G1CollectForAllocation
op
(
gc_count_before
,
word_size
);
gc_count_before
);
// ...and get the VM thread to execute it.
// ...and get the VM thread to execute it.
VMThread
::
execute
(
&
op
);
VMThread
::
execute
(
&
op
);
if
(
op
.
prologue_succeeded
())
{
result
=
op
.
result
();
assert_heap_not_locked
();
assert
(
result
==
NULL
||
is_in
(
result
),
"result not in heap"
);
if
(
op
.
prologue_succeeded
()
&&
op
.
pause_succeeded
())
{
// If the operation was successful we'll return the result even
// if it is NULL. If the allocation attempt failed immediately
// after a Full GC, it's unlikely we'll be able to allocate now.
HeapWord
*
result
=
op
.
result
();
if
(
result
!=
NULL
&&
!
isHumongous
(
word_size
))
{
// Allocations that take place on VM operations do not do any
// card dirtying and we have to do it here. We only have to do
// this for non-humongous allocations, though.
dirty_young_block
(
result
,
word_size
);
}
return
result
;
return
result
;
}
else
{
assert
(
op
.
result
()
==
NULL
,
"the result should be NULL if the VM op did not succeed"
);
}
}
// Give a warning if we seem to be looping forever.
// Give a warning if we seem to be looping forever.
if
((
QueuedAllocationWarningCount
>
0
)
&&
if
((
QueuedAllocationWarningCount
>
0
)
&&
(
try_count
%
QueuedAllocationWarningCount
==
0
))
{
(
try_count
%
QueuedAllocationWarningCount
==
0
))
{
warning
(
"G1CollectedHeap::mem_allocate_work retries %d times"
,
warning
(
"G1CollectedHeap::mem_allocate retries %d times"
,
try_count
);
try_count
);
}
}
}
}
ShouldNotReachHere
();
}
}
void
G1CollectedHeap
::
abandon_cur_alloc_region
()
{
void
G1CollectedHeap
::
abandon_cur_alloc_region
()
{
...
@@ -841,11 +1165,11 @@ public:
...
@@ -841,11 +1165,11 @@ public:
}
}
};
};
void
G1CollectedHeap
::
do_collection
(
bool
explicit_gc
,
bool
G1CollectedHeap
::
do_collection
(
bool
explicit_gc
,
bool
clear_all_soft_refs
,
bool
clear_all_soft_refs
,
size_t
word_size
)
{
size_t
word_size
)
{
if
(
GC_locker
::
check_active_before_gc
())
{
if
(
GC_locker
::
check_active_before_gc
())
{
return
;
// GC is disabled (e.g. JNI GetXXXCritical operation)
return
false
;
}
}
ResourceMark
rm
;
ResourceMark
rm
;
...
@@ -929,6 +1253,9 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
...
@@ -929,6 +1253,9 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
g1_policy
()
->
set_full_young_gcs
(
true
);
g1_policy
()
->
set_full_young_gcs
(
true
);
}
}
// See the comment in G1CollectedHeap::ref_processing_init() about
// how reference processing currently works in G1.
// Temporarily make reference _discovery_ single threaded (non-MT).
// Temporarily make reference _discovery_ single threaded (non-MT).
ReferenceProcessorMTMutator
rp_disc_ser
(
ref_processor
(),
false
);
ReferenceProcessorMTMutator
rp_disc_ser
(
ref_processor
(),
false
);
...
@@ -1047,10 +1374,17 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
...
@@ -1047,10 +1374,17 @@ void G1CollectedHeap::do_collection(bool explicit_gc,
if
(
PrintHeapAtGC
)
{
if
(
PrintHeapAtGC
)
{
Universe
::
print_heap_after_gc
();
Universe
::
print_heap_after_gc
();
}
}
return
true
;
}
}
void
G1CollectedHeap
::
do_full_collection
(
bool
clear_all_soft_refs
)
{
void
G1CollectedHeap
::
do_full_collection
(
bool
clear_all_soft_refs
)
{
do_collection
(
true
,
/* explicit_gc */
// do_collection() will return whether it succeeded in performing
// the GC. Currently, there is no facility on the
// do_full_collection() API to notify the caller than the collection
// did not succeed (e.g., because it was locked out by the GC
// locker). So, right now, we'll ignore the return value.
bool
dummy
=
do_collection
(
true
,
/* explicit_gc */
clear_all_soft_refs
,
clear_all_soft_refs
,
0
/* word_size */
);
0
/* word_size */
);
}
}
...
@@ -1175,36 +1509,63 @@ resize_if_necessary_after_full_collection(size_t word_size) {
...
@@ -1175,36 +1509,63 @@ resize_if_necessary_after_full_collection(size_t word_size) {
HeapWord
*
HeapWord
*
G1CollectedHeap
::
satisfy_failed_allocation
(
size_t
word_size
)
{
G1CollectedHeap
::
satisfy_failed_allocation
(
size_t
word_size
,
HeapWord
*
result
=
NULL
;
bool
*
succeeded
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"satisfy_failed_allocation() should only be called at a safepoint"
);
assert
(
Thread
::
current
()
->
is_VM_thread
(),
"satisfy_failed_allocation() should only be called by the VM thread"
);
*
succeeded
=
true
;
// Let's attempt the allocation first.
HeapWord
*
result
=
attempt_allocation_at_safepoint
(
word_size
,
false
/* expect_null_cur_alloc_region */
);
if
(
result
!=
NULL
)
{
assert
(
*
succeeded
,
"sanity"
);
return
result
;
}
// In a G1 heap, we're supposed to keep allocation from failing by
// In a G1 heap, we're supposed to keep allocation from failing by
// incremental pauses. Therefore, at least for now, we'll favor
// incremental pauses. Therefore, at least for now, we'll favor
// expansion over collection. (This might change in the future if we can
// expansion over collection. (This might change in the future if we can
// do something smarter than full collection to satisfy a failed alloc.)
// do something smarter than full collection to satisfy a failed alloc.)
result
=
expand_and_allocate
(
word_size
);
result
=
expand_and_allocate
(
word_size
);
if
(
result
!=
NULL
)
{
if
(
result
!=
NULL
)
{
assert
(
is_in
(
result
),
"result not in heap
"
);
assert
(
*
succeeded
,
"sanity
"
);
return
result
;
return
result
;
}
}
// OK, I guess we have to try collection.
// Expansion didn't work, we'll try to do a Full GC.
bool
gc_succeeded
=
do_collection
(
false
,
/* explicit_gc */
do_collection
(
false
,
false
,
word_size
);
false
,
/* clear_all_soft_refs */
word_size
);
result
=
attempt_allocation
(
word_size
,
/*permit_collection_pause*/
false
);
if
(
!
gc_succeeded
)
{
*
succeeded
=
false
;
return
NULL
;
}
// Retry the allocation
result
=
attempt_allocation_at_safepoint
(
word_size
,
true
/* expect_null_cur_alloc_region */
);
if
(
result
!=
NULL
)
{
if
(
result
!=
NULL
)
{
assert
(
is_in
(
result
),
"result not in heap
"
);
assert
(
*
succeeded
,
"sanity
"
);
return
result
;
return
result
;
}
}
// Try collecting soft references.
// Then, try a Full GC that will collect all soft references.
do_collection
(
false
,
true
,
word_size
);
gc_succeeded
=
do_collection
(
false
,
/* explicit_gc */
result
=
attempt_allocation
(
word_size
,
/*permit_collection_pause*/
false
);
true
,
/* clear_all_soft_refs */
word_size
);
if
(
!
gc_succeeded
)
{
*
succeeded
=
false
;
return
NULL
;
}
// Retry the allocation once more
result
=
attempt_allocation_at_safepoint
(
word_size
,
true
/* expect_null_cur_alloc_region */
);
if
(
result
!=
NULL
)
{
if
(
result
!=
NULL
)
{
assert
(
is_in
(
result
),
"result not in heap
"
);
assert
(
*
succeeded
,
"sanity
"
);
return
result
;
return
result
;
}
}
...
@@ -1215,6 +1576,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
...
@@ -1215,6 +1576,7 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
// space available is large enough for the allocation, then a more
// space available is large enough for the allocation, then a more
// complete compaction phase than we've tried so far might be
// complete compaction phase than we've tried so far might be
// appropriate.
// appropriate.
assert
(
*
succeeded
,
"sanity"
);
return
NULL
;
return
NULL
;
}
}
...
@@ -1224,14 +1586,20 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
...
@@ -1224,14 +1586,20 @@ G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
// allocated block, or else "NULL".
// allocated block, or else "NULL".
HeapWord
*
G1CollectedHeap
::
expand_and_allocate
(
size_t
word_size
)
{
HeapWord
*
G1CollectedHeap
::
expand_and_allocate
(
size_t
word_size
)
{
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"expand_and_allocate() should only be called at a safepoint"
);
assert
(
Thread
::
current
()
->
is_VM_thread
(),
"expand_and_allocate() should only be called by the VM thread"
);
size_t
expand_bytes
=
word_size
*
HeapWordSize
;
size_t
expand_bytes
=
word_size
*
HeapWordSize
;
if
(
expand_bytes
<
MinHeapDeltaBytes
)
{
if
(
expand_bytes
<
MinHeapDeltaBytes
)
{
expand_bytes
=
MinHeapDeltaBytes
;
expand_bytes
=
MinHeapDeltaBytes
;
}
}
expand
(
expand_bytes
);
expand
(
expand_bytes
);
assert
(
regions_accounted_for
(),
"Region leakage!"
);
assert
(
regions_accounted_for
(),
"Region leakage!"
);
HeapWord
*
result
=
attempt_allocation
(
word_size
,
false
/* permit_collection_pause */
);
return
result
;
return
attempt_allocation_at_safepoint
(
word_size
,
false
/* expect_null_cur_alloc_region */
);
}
}
size_t
G1CollectedHeap
::
free_region_if_totally_empty
(
HeapRegion
*
hr
)
{
size_t
G1CollectedHeap
::
free_region_if_totally_empty
(
HeapRegion
*
hr
)
{
...
@@ -1649,6 +2017,24 @@ jint G1CollectedHeap::initialize() {
...
@@ -1649,6 +2017,24 @@ jint G1CollectedHeap::initialize() {
}
}
void
G1CollectedHeap
::
ref_processing_init
()
{
void
G1CollectedHeap
::
ref_processing_init
()
{
// Reference processing in G1 currently works as follows:
//
// * There is only one reference processor instance that
// 'spans' the entire heap. It is created by the code
// below.
// * Reference discovery is not enabled during an incremental
// pause (see 6484982).
// * Discoverered refs are not enqueued nor are they processed
// during an incremental pause (see 6484982).
// * Reference discovery is enabled at initial marking.
// * Reference discovery is disabled and the discovered
// references processed etc during remarking.
// * Reference discovery is MT (see below).
// * Reference discovery requires a barrier (see below).
// * Reference processing is currently not MT (see 6608385).
// * A full GC enables (non-MT) reference discovery and
// processes any discovered references.
SharedHeap
::
ref_processing_init
();
SharedHeap
::
ref_processing_init
();
MemRegion
mr
=
reserved_region
();
MemRegion
mr
=
reserved_region
();
_ref_processor
=
ReferenceProcessor
::
create_ref_processor
(
_ref_processor
=
ReferenceProcessor
::
create_ref_processor
(
...
@@ -1842,20 +2228,24 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
...
@@ -1842,20 +2228,24 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
unsigned
int
full_gc_count_before
;
unsigned
int
full_gc_count_before
;
{
{
MutexLocker
ml
(
Heap_lock
);
MutexLocker
ml
(
Heap_lock
);
// Read the GC count while holding the Heap_lock
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
// Don't want to do a GC until cleanup is completed.
// Don't want to do a GC until cleanup is completed. This
// limitation will be removed in the near future when the
// operation of the free region list is revamped as part of
// CR 6977804.
wait_for_cleanup_complete
();
wait_for_cleanup_complete
();
// We give up heap lock; VMThread::execute gets it back below
// Read the GC count while holding the Heap_lock
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
full_gc_count_before
=
SharedHeap
::
heap
()
->
total_full_collections
();
}
}
if
(
should_do_concurrent_full_gc
(
cause
))
{
if
(
should_do_concurrent_full_gc
(
cause
))
{
// Schedule an initial-mark evacuation pause that will start a
// Schedule an initial-mark evacuation pause that will start a
// concurrent cycle.
// concurrent cycle. We're setting word_size to 0 which means that
// we are not requesting a post-GC allocation.
VM_G1IncCollectionPause
op
(
gc_count_before
,
VM_G1IncCollectionPause
op
(
gc_count_before
,
0
,
/* word_size */
true
,
/* should_initiate_conc_mark */
true
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
g1_policy
()
->
max_pause_time_ms
(),
cause
);
cause
);
...
@@ -1864,8 +2254,10 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
...
@@ -1864,8 +2254,10 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
if
(
cause
==
GCCause
::
_gc_locker
if
(
cause
==
GCCause
::
_gc_locker
DEBUG_ONLY
(
||
cause
==
GCCause
::
_scavenge_alot
))
{
DEBUG_ONLY
(
||
cause
==
GCCause
::
_scavenge_alot
))
{
// Schedule a standard evacuation pause.
// Schedule a standard evacuation pause. We're setting word_size
// to 0 which means that we are not requesting a post-GC allocation.
VM_G1IncCollectionPause
op
(
gc_count_before
,
VM_G1IncCollectionPause
op
(
gc_count_before
,
0
,
/* word_size */
false
,
/* should_initiate_conc_mark */
false
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
g1_policy
()
->
max_pause_time_ms
(),
cause
);
cause
);
...
@@ -2221,14 +2613,6 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
...
@@ -2221,14 +2613,6 @@ size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
}
}
}
}
HeapWord
*
G1CollectedHeap
::
allocate_new_tlab
(
size_t
word_size
)
{
assert
(
!
isHumongous
(
word_size
),
err_msg
(
"a TLAB should not be of humongous size, "
"word_size = "
SIZE_FORMAT
,
word_size
));
bool
dummy
;
return
G1CollectedHeap
::
mem_allocate
(
word_size
,
false
,
true
,
&
dummy
);
}
bool
G1CollectedHeap
::
allocs_are_zero_filled
()
{
bool
G1CollectedHeap
::
allocs_are_zero_filled
()
{
return
false
;
return
false
;
}
}
...
@@ -2633,27 +3017,26 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
...
@@ -2633,27 +3017,26 @@ void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
// always_do_update_barrier = true;
// always_do_update_barrier = true;
}
}
void
G1CollectedHeap
::
do_collection_pause
()
{
HeapWord
*
G1CollectedHeap
::
do_collection_pause
(
size_t
word_size
,
assert
(
Heap_lock
->
owned_by_self
(),
"we assume we'reholding the Heap_lock"
);
unsigned
int
gc_count_before
,
bool
*
succeeded
)
{
// Read the GC count while holding the Heap_lock
assert_heap_not_locked_and_not_at_safepoint
();
// we need to do this _before_ wait_for_cleanup_complete(), to
// ensure that we do not give up the heap lock and potentially
// pick up the wrong count
unsigned
int
gc_count_before
=
SharedHeap
::
heap
()
->
total_collections
();
// Don't want to do a GC pause while cleanup is being completed!
wait_for_cleanup_complete
();
g1_policy
()
->
record_stop_world_start
();
g1_policy
()
->
record_stop_world_start
();
{
MutexUnlocker
mu
(
Heap_lock
);
// give up heap lock, execute gets it back
VM_G1IncCollectionPause
op
(
gc_count_before
,
VM_G1IncCollectionPause
op
(
gc_count_before
,
word_size
,
false
,
/* should_initiate_conc_mark */
false
,
/* should_initiate_conc_mark */
g1_policy
()
->
max_pause_time_ms
(),
g1_policy
()
->
max_pause_time_ms
(),
GCCause
::
_g1_inc_collection_pause
);
GCCause
::
_g1_inc_collection_pause
);
VMThread
::
execute
(
&
op
);
VMThread
::
execute
(
&
op
);
}
HeapWord
*
result
=
op
.
result
();
bool
ret_succeeded
=
op
.
prologue_succeeded
()
&&
op
.
pause_succeeded
();
assert
(
result
==
NULL
||
ret_succeeded
,
"the result should be NULL if the VM did not succeed"
);
*
succeeded
=
ret_succeeded
;
assert_heap_not_locked
();
return
result
;
}
}
void
void
...
@@ -2797,10 +3180,10 @@ void G1CollectedHeap::reset_taskqueue_stats() {
...
@@ -2797,10 +3180,10 @@ void G1CollectedHeap::reset_taskqueue_stats() {
}
}
#endif // TASKQUEUE_STATS
#endif // TASKQUEUE_STATS
void
bool
G1CollectedHeap
::
do_collection_pause_at_safepoint
(
double
target_pause_time_ms
)
{
G1CollectedHeap
::
do_collection_pause_at_safepoint
(
double
target_pause_time_ms
)
{
if
(
GC_locker
::
check_active_before_gc
())
{
if
(
GC_locker
::
check_active_before_gc
())
{
return
;
// GC is disabled (e.g. JNI GetXXXCritical operation)
return
false
;
}
}
if
(
PrintHeapAtGC
)
{
if
(
PrintHeapAtGC
)
{
...
@@ -2871,6 +3254,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
...
@@ -2871,6 +3254,9 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
COMPILER2_PRESENT
(
DerivedPointerTable
::
clear
());
COMPILER2_PRESENT
(
DerivedPointerTable
::
clear
());
// Please see comment in G1CollectedHeap::ref_processing_init()
// to see how reference processing currently works in G1.
//
// We want to turn off ref discovery, if necessary, and turn it back on
// We want to turn off ref discovery, if necessary, and turn it back on
// on again later if we do. XXX Dubious: why is discovery disabled?
// on again later if we do. XXX Dubious: why is discovery disabled?
bool
was_enabled
=
ref_processor
()
->
discovery_enabled
();
bool
was_enabled
=
ref_processor
()
->
discovery_enabled
();
...
@@ -3068,6 +3454,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
...
@@ -3068,6 +3454,8 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
(
total_collections
()
%
G1SummarizeRSetStatsPeriod
==
0
))
{
(
total_collections
()
%
G1SummarizeRSetStatsPeriod
==
0
))
{
g1_rem_set
()
->
print_summary_info
();
g1_rem_set
()
->
print_summary_info
();
}
}
return
true
;
}
}
size_t
G1CollectedHeap
::
desired_plab_sz
(
GCAllocPurpose
purpose
)
size_t
G1CollectedHeap
::
desired_plab_sz
(
GCAllocPurpose
purpose
)
...
@@ -3298,6 +3686,7 @@ void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
...
@@ -3298,6 +3686,7 @@ void G1CollectedHeap::release_gc_alloc_regions(bool totally) {
// untag the GC alloc regions and tear down the GC alloc region
// untag the GC alloc regions and tear down the GC alloc region
// list. It's desirable that no regions are tagged as GC alloc
// list. It's desirable that no regions are tagged as GC alloc
// outside GCs.
// outside GCs.
forget_alloc_region_list
();
forget_alloc_region_list
();
// The current alloc regions contain objs that have survived
// The current alloc regions contain objs that have survived
...
@@ -3361,19 +3750,6 @@ void G1CollectedHeap::finalize_for_evac_failure() {
...
@@ -3361,19 +3750,6 @@ void G1CollectedHeap::finalize_for_evac_failure() {
// *** Sequential G1 Evacuation
// *** Sequential G1 Evacuation
HeapWord
*
G1CollectedHeap
::
allocate_during_gc
(
GCAllocPurpose
purpose
,
size_t
word_size
)
{
HeapRegion
*
alloc_region
=
_gc_alloc_regions
[
purpose
];
// let the caller handle alloc failure
if
(
alloc_region
==
NULL
)
return
NULL
;
assert
(
isHumongous
(
word_size
)
||
!
alloc_region
->
isHumongous
(),
"Either the object is humongous or the region isn't"
);
HeapWord
*
block
=
alloc_region
->
allocate
(
word_size
);
if
(
block
==
NULL
)
{
block
=
allocate_during_gc_slow
(
purpose
,
alloc_region
,
false
,
word_size
);
}
return
block
;
}
class
G1IsAliveClosure
:
public
BoolObjectClosure
{
class
G1IsAliveClosure
:
public
BoolObjectClosure
{
G1CollectedHeap
*
_g1
;
G1CollectedHeap
*
_g1
;
public:
public:
...
@@ -4316,6 +4692,10 @@ g1_process_strong_roots(bool collecting_perm_gen,
...
@@ -4316,6 +4692,10 @@ g1_process_strong_roots(bool collecting_perm_gen,
}
}
// Finish with the ref_processor roots.
// Finish with the ref_processor roots.
if
(
!
_process_strong_tasks
->
is_task_claimed
(
G1H_PS_refProcessor_oops_do
))
{
if
(
!
_process_strong_tasks
->
is_task_claimed
(
G1H_PS_refProcessor_oops_do
))
{
// We need to treat the discovered reference lists as roots and
// keep entries (which are added by the marking threads) on them
// live until they can be processed at the end of marking.
ref_processor
()
->
weak_oops_do
(
scan_non_heap_roots
);
ref_processor
()
->
oops_do
(
scan_non_heap_roots
);
ref_processor
()
->
oops_do
(
scan_non_heap_roots
);
}
}
g1_policy
()
->
record_collection_pause_end_G1_strong_roots
();
g1_policy
()
->
record_collection_pause_end_G1_strong_roots
();
...
@@ -4381,6 +4761,11 @@ void G1CollectedHeap::evacuate_collection_set() {
...
@@ -4381,6 +4761,11 @@ void G1CollectedHeap::evacuate_collection_set() {
// on individual heap regions when we allocate from
// on individual heap regions when we allocate from
// them in parallel, so this seems like the correct place for this.
// them in parallel, so this seems like the correct place for this.
retire_all_alloc_regions
();
retire_all_alloc_regions
();
// Weak root processing.
// Note: when JSR 292 is enabled and code blobs can contain
// non-perm oops then we will need to process the code blobs
// here too.
{
{
G1IsAliveClosure
is_alive
(
this
);
G1IsAliveClosure
is_alive
(
this
);
G1KeepAliveClosure
keep_alive
(
this
);
G1KeepAliveClosure
keep_alive
(
this
);
...
@@ -4625,12 +5010,6 @@ void G1CollectedHeap::cleanUpCardTable() {
...
@@ -4625,12 +5010,6 @@ void G1CollectedHeap::cleanUpCardTable() {
#endif
#endif
}
}
void
G1CollectedHeap
::
do_collection_pause_if_appropriate
(
size_t
word_size
)
{
if
(
g1_policy
()
->
should_do_collection_pause
(
word_size
))
{
do_collection_pause
();
}
}
void
G1CollectedHeap
::
free_collection_set
(
HeapRegion
*
cs_head
)
{
void
G1CollectedHeap
::
free_collection_set
(
HeapRegion
*
cs_head
)
{
double
young_time_ms
=
0.0
;
double
young_time_ms
=
0.0
;
double
non_young_time_ms
=
0.0
;
double
non_young_time_ms
=
0.0
;
...
@@ -4789,6 +5168,7 @@ void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
...
@@ -4789,6 +5168,7 @@ void G1CollectedHeap::set_unclean_regions_coming_locked(bool b) {
}
}
void
G1CollectedHeap
::
wait_for_cleanup_complete
()
{
void
G1CollectedHeap
::
wait_for_cleanup_complete
()
{
assert_not_at_safepoint
();
MutexLockerEx
x
(
Cleanup_mon
);
MutexLockerEx
x
(
Cleanup_mon
);
wait_for_cleanup_complete_locked
();
wait_for_cleanup_complete_locked
();
}
}
...
@@ -5093,13 +5473,6 @@ size_t G1CollectedHeap::count_free_regions_list() {
...
@@ -5093,13 +5473,6 @@ size_t G1CollectedHeap::count_free_regions_list() {
return
n
+
m
;
return
n
+
m
;
}
}
bool
G1CollectedHeap
::
should_set_young_locked
()
{
assert
(
heap_lock_held_for_gc
(),
"the heap lock should already be held by or for this thread"
);
return
(
g1_policy
()
->
in_young_gc_mode
()
&&
g1_policy
()
->
should_add_next_region_to_young_list
());
}
void
G1CollectedHeap
::
set_region_short_lived_locked
(
HeapRegion
*
hr
)
{
void
G1CollectedHeap
::
set_region_short_lived_locked
(
HeapRegion
*
hr
)
{
assert
(
heap_lock_held_for_gc
(),
assert
(
heap_lock_held_for_gc
(),
"the heap lock should already be held by or for this thread"
);
"the heap lock should already be held by or for this thread"
);
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
浏览文件 @
5a82c0b0
...
@@ -290,6 +290,63 @@ private:
...
@@ -290,6 +290,63 @@ private:
// started is maintained in _total_full_collections in CollectedHeap.
// started is maintained in _total_full_collections in CollectedHeap.
volatile
unsigned
int
_full_collections_completed
;
volatile
unsigned
int
_full_collections_completed
;
// These are macros so that, if the assert fires, we get the correct
// line number, file, etc.
#define heap_locking_asserts_err_msg(__extra_message) \
err_msg("%s : Heap_lock %slocked, %sat a safepoint", \
(__extra_message), \
(!Heap_lock->owned_by_self()) ? "NOT " : "", \
(!SafepointSynchronize::is_at_safepoint()) ? "NOT " : "")
#define assert_heap_locked() \
do { \
assert(Heap_lock->owned_by_self(), \
heap_locking_asserts_err_msg("should be holding the Heap_lock")); \
} while (0)
#define assert_heap_locked_or_at_safepoint() \
do { \
assert(Heap_lock->owned_by_self() || \
SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should be holding the Heap_lock or " \
"should be at a safepoint")); \
} while (0)
#define assert_heap_locked_and_not_at_safepoint() \
do { \
assert(Heap_lock->owned_by_self() && \
!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should be holding the Heap_lock and " \
"should not be at a safepoint")); \
} while (0)
#define assert_heap_not_locked() \
do { \
assert(!Heap_lock->owned_by_self(), \
heap_locking_asserts_err_msg("should not be holding the Heap_lock")); \
} while (0)
#define assert_heap_not_locked_and_not_at_safepoint() \
do { \
assert(!Heap_lock->owned_by_self() && \
!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should not be holding the Heap_lock and " \
"should not be at a safepoint")); \
} while (0)
#define assert_at_safepoint() \
do { \
assert(SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should be at a safepoint")); \
} while (0)
#define assert_not_at_safepoint() \
do { \
assert(!SafepointSynchronize::is_at_safepoint(), \
heap_locking_asserts_err_msg("should not be at a safepoint")); \
} while (0)
protected:
protected:
// Returns "true" iff none of the gc alloc regions have any allocations
// Returns "true" iff none of the gc alloc regions have any allocations
...
@@ -329,31 +386,162 @@ protected:
...
@@ -329,31 +386,162 @@ protected:
// Attempt to allocate an object of the given (very large) "word_size".
// Attempt to allocate an object of the given (very large) "word_size".
// Returns "NULL" on failure.
// Returns "NULL" on failure.
virtual
HeapWord
*
humongousObjAllocate
(
size_t
word_size
);
virtual
HeapWord
*
humongous_obj_allocate
(
size_t
word_size
);
// If possible, allocate a block of the given word_size, else return "NULL".
// The following two methods, allocate_new_tlab() and
// Returning NULL will trigger GC or heap expansion.
// mem_allocate(), are the two main entry points from the runtime
// These two methods have rather awkward pre- and
// into the G1's allocation routines. They have the following
// post-conditions. If they are called outside a safepoint, then
// assumptions:
// they assume that the caller is holding the heap lock. Upon return
//
// they release the heap lock, if they are returning a non-NULL
// * They should both be called outside safepoints.
// value. attempt_allocation_slow() also dirties the cards of a
//
// newly-allocated young region after it releases the heap
// * They should both be called without holding the Heap_lock.
// lock. This change in interface was the neatest way to achieve
//
// this card dirtying without affecting mem_allocate(), which is a
// * All allocation requests for new TLABs should go to
// more frequently called method. We tried two or three different
// allocate_new_tlab().
// approaches, but they were even more hacky.
//
HeapWord
*
attempt_allocation
(
size_t
word_size
,
// * All non-TLAB allocation requests should go to mem_allocate()
bool
permit_collection_pause
=
true
);
// and mem_allocate() should never be called with is_tlab == true.
//
HeapWord
*
attempt_allocation_slow
(
size_t
word_size
,
// * If the GC locker is active we currently stall until we can
bool
permit_collection_pause
=
true
);
// allocate a new young region. This will be changed in the
// near future (see CR 6994056).
//
// * If either call cannot satisfy the allocation request using the
// current allocating region, they will try to get a new one. If
// this fails, they will attempt to do an evacuation pause and
// retry the allocation.
//
// * If all allocation attempts fail, even after trying to schedule
// an evacuation pause, allocate_new_tlab() will return NULL,
// whereas mem_allocate() will attempt a heap expansion and/or
// schedule a Full GC.
//
// * We do not allow humongous-sized TLABs. So, allocate_new_tlab
// should never be called with word_size being humongous. All
// humongous allocation requests should go to mem_allocate() which
// will satisfy them with a special path.
virtual
HeapWord
*
allocate_new_tlab
(
size_t
word_size
);
virtual
HeapWord
*
mem_allocate
(
size_t
word_size
,
bool
is_noref
,
bool
is_tlab
,
/* expected to be false */
bool
*
gc_overhead_limit_was_exceeded
);
// The following methods, allocate_from_cur_allocation_region(),
// attempt_allocation(), replace_cur_alloc_region_and_allocate(),
// attempt_allocation_slow(), and attempt_allocation_humongous()
// have very awkward pre- and post-conditions with respect to
// locking:
//
// If they are called outside a safepoint they assume the caller
// holds the Heap_lock when it calls them. However, on exit they
// will release the Heap_lock if they return a non-NULL result, but
// keep holding the Heap_lock if they return a NULL result. The
// reason for this is that we need to dirty the cards that span
// allocated blocks on young regions to avoid having to take the
// slow path of the write barrier (for performance reasons we don't
// update RSets for references whose source is a young region, so we
// don't need to look at dirty cards on young regions). But, doing
// this card dirtying while holding the Heap_lock can be a
// scalability bottleneck, especially given that some allocation
// requests might be of non-trivial size (and the larger the region
// size is, the fewer allocations requests will be considered
// humongous, as the humongous size limit is a fraction of the
// region size). So, when one of these calls succeeds in allocating
// a block it does the card dirtying after it releases the Heap_lock
// which is why it will return without holding it.
//
// The above assymetry is the reason why locking / unlocking is done
// explicitly (i.e., with Heap_lock->lock() and
// Heap_lock->unlocked()) instead of using MutexLocker and
// MutexUnlocker objects. The latter would ensure that the lock is
// unlocked / re-locked at every possible exit out of the basic
// block. However, we only want that action to happen in selected
// places.
//
// Further, if the above methods are called during a safepoint, then
// naturally there's no assumption about the Heap_lock being held or
// there's no attempt to unlock it. The parameter at_safepoint
// indicates whether the call is made during a safepoint or not (as
// an optimization, to avoid reading the global flag with
// SafepointSynchronize::is_at_safepoint()).
//
// The methods share these parameters:
//
// * word_size : the size of the allocation request in words
// * at_safepoint : whether the call is done at a safepoint; this
// also determines whether a GC is permitted
// (at_safepoint == false) or not (at_safepoint == true)
// * do_dirtying : whether the method should dirty the allocated
// block before returning
//
// They all return either the address of the block, if they
// successfully manage to allocate it, or NULL.
// It tries to satisfy an allocation request out of the current
// allocating region, which is passed as a parameter. It assumes
// that the caller has checked that the current allocating region is
// not NULL. Given that the caller has to check the current
// allocating region for at least NULL, it might as well pass it as
// the first parameter so that the method doesn't have to read it
// from the _cur_alloc_region field again.
inline
HeapWord
*
allocate_from_cur_alloc_region
(
HeapRegion
*
cur_alloc_region
,
size_t
word_size
);
// It attempts to allocate out of the current alloc region. If that
// fails, it retires the current alloc region (if there is one),
// tries to get a new one and retries the allocation.
inline
HeapWord
*
attempt_allocation
(
size_t
word_size
);
// It assumes that the current alloc region has been retired and
// tries to allocate a new one. If it's successful, it performs
// the allocation out of the new current alloc region and updates
// _cur_alloc_region.
HeapWord
*
replace_cur_alloc_region_and_allocate
(
size_t
word_size
,
bool
at_safepoint
,
bool
do_dirtying
);
// The slow path when we are unable to allocate a new current alloc
// region to satisfy an allocation request (i.e., when
// attempt_allocation() fails). It will try to do an evacuation
// pause, which might stall due to the GC locker, and retry the
// allocation attempt when appropriate.
HeapWord
*
attempt_allocation_slow
(
size_t
word_size
);
// The method that tries to satisfy a humongous allocation
// request. If it cannot satisfy it it will try to do an evacuation
// pause to perhaps reclaim enough space to be able to satisfy the
// allocation request afterwards.
HeapWord
*
attempt_allocation_humongous
(
size_t
word_size
,
bool
at_safepoint
);
// It does the common work when we are retiring the current alloc region.
inline
void
retire_cur_alloc_region_common
(
HeapRegion
*
cur_alloc_region
);
// It retires the current alloc region, which is passed as a
// parameter (since, typically, the caller is already holding on to
// it). It sets _cur_alloc_region to NULL.
void
retire_cur_alloc_region
(
HeapRegion
*
cur_alloc_region
);
// It attempts to do an allocation immediately before or after an
// evacuation pause and can only be called by the VM thread. It has
// slightly different assumptions that the ones before (i.e.,
// assumes that the current alloc region has been retired).
HeapWord
*
attempt_allocation_at_safepoint
(
size_t
word_size
,
bool
expect_null_cur_alloc_region
);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline
void
dirty_young_block
(
HeapWord
*
start
,
size_t
word_size
);
// Allocate blocks during garbage collection. Will ensure an
// Allocate blocks during garbage collection. Will ensure an
// allocation region, either by picking one or expanding the
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
// may not be a humongous - it must fit into a single heap region.
HeapWord
*
allocate_during_gc
(
GCAllocPurpose
purpose
,
size_t
word_size
);
HeapWord
*
par_allocate_during_gc
(
GCAllocPurpose
purpose
,
size_t
word_size
);
HeapWord
*
par_allocate_during_gc
(
GCAllocPurpose
purpose
,
size_t
word_size
);
HeapWord
*
allocate_during_gc_slow
(
GCAllocPurpose
purpose
,
HeapWord
*
allocate_during_gc_slow
(
GCAllocPurpose
purpose
,
...
@@ -371,11 +559,13 @@ protected:
...
@@ -371,11 +559,13 @@ protected:
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// - if explicit_gc is true, the GC is for a System.gc() or a heap
// inspection request and should collect the entire heap
// inspection request and should collect the entire heap
// - if clear_all_soft_refs is true, all soft references
are cleared
// - if clear_all_soft_refs is true, all soft references
should be
// during the GC
//
cleared
during the GC
// - if explicit_gc is false, word_size describes the allocation that
// - if explicit_gc is false, word_size describes the allocation that
// the GC should attempt (at least) to satisfy
// the GC should attempt (at least) to satisfy
void
do_collection
(
bool
explicit_gc
,
// - it returns false if it is unable to do the collection due to the
// GC locker being active, true otherwise
bool
do_collection
(
bool
explicit_gc
,
bool
clear_all_soft_refs
,
bool
clear_all_soft_refs
,
size_t
word_size
);
size_t
word_size
);
...
@@ -391,13 +581,13 @@ protected:
...
@@ -391,13 +581,13 @@ protected:
// Callback from VM_G1CollectForAllocation operation.
// Callback from VM_G1CollectForAllocation operation.
// This function does everything necessary/possible to satisfy a
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
// failed allocation request (including collection, expansion, etc.)
HeapWord
*
satisfy_failed_allocation
(
size_t
word_size
);
HeapWord
*
satisfy_failed_allocation
(
size_t
word_size
,
bool
*
succeeded
);
// Attempting to expand the heap sufficiently
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
// allocated block, or else "NULL".
virtual
HeapWord
*
expand_and_allocate
(
size_t
word_size
);
HeapWord
*
expand_and_allocate
(
size_t
word_size
);
public:
public:
// Expand the garbage-first heap by at least the given size (in bytes!).
// Expand the garbage-first heap by at least the given size (in bytes!).
...
@@ -478,21 +668,27 @@ protected:
...
@@ -478,21 +668,27 @@ protected:
void
reset_taskqueue_stats
();
void
reset_taskqueue_stats
();
#endif // TASKQUEUE_STATS
#endif // TASKQUEUE_STATS
// Do an incremental collection: identify a collection set, and evacuate
// Schedule the VM operation that will do an evacuation pause to
// its live objects elsewhere.
// satisfy an allocation request of word_size. *succeeded will
virtual
void
do_collection_pause
();
// return whether the VM operation was successful (it did do an
// evacuation pause) or not (another thread beat us to it or the GC
// locker was active). Given that we should not be holding the
// Heap_lock when we enter this method, we will pass the
// gc_count_before (i.e., total_collections()) as a parameter since
// it has to be read while holding the Heap_lock. Currently, both
// methods that call do_collection_pause() release the Heap_lock
// before the call, so it's easy to read gc_count_before just before.
HeapWord
*
do_collection_pause
(
size_t
word_size
,
unsigned
int
gc_count_before
,
bool
*
succeeded
);
// The guts of the incremental collection pause, executed by the vm
// The guts of the incremental collection pause, executed by the vm
// thread.
// thread. It returns false if it is unable to do the collection due
virtual
void
do_collection_pause_at_safepoint
(
double
target_pause_time_ms
);
// to the GC locker being active, true otherwise
bool
do_collection_pause_at_safepoint
(
double
target_pause_time_ms
);
// Actually do the work of evacuating the collection set.
// Actually do the work of evacuating the collection set.
virtual
void
evacuate_collection_set
();
void
evacuate_collection_set
();
// If this is an appropriate right time, do a collection pause.
// The "word_size" argument, if non-zero, indicates the size of an
// allocation request that is prompting this query.
void
do_collection_pause_if_appropriate
(
size_t
word_size
);
// The g1 remembered set of the heap.
// The g1 remembered set of the heap.
G1RemSet
*
_g1_rem_set
;
G1RemSet
*
_g1_rem_set
;
...
@@ -762,11 +958,6 @@ public:
...
@@ -762,11 +958,6 @@ public:
#endif // PRODUCT
#endif // PRODUCT
// These virtual functions do the actual allocation.
// These virtual functions do the actual allocation.
virtual
HeapWord
*
mem_allocate
(
size_t
word_size
,
bool
is_noref
,
bool
is_tlab
,
bool
*
gc_overhead_limit_was_exceeded
);
// Some heaps may offer a contiguous region for shared non-blocking
// Some heaps may offer a contiguous region for shared non-blocking
// allocation, via inlined code (by exporting the address of the top and
// allocation, via inlined code (by exporting the address of the top and
// end fields defining the extent of the contiguous allocation region.)
// end fields defining the extent of the contiguous allocation region.)
...
@@ -1046,7 +1237,6 @@ public:
...
@@ -1046,7 +1237,6 @@ public:
virtual
bool
supports_tlab_allocation
()
const
;
virtual
bool
supports_tlab_allocation
()
const
;
virtual
size_t
tlab_capacity
(
Thread
*
thr
)
const
;
virtual
size_t
tlab_capacity
(
Thread
*
thr
)
const
;
virtual
size_t
unsafe_max_tlab_alloc
(
Thread
*
thr
)
const
;
virtual
size_t
unsafe_max_tlab_alloc
(
Thread
*
thr
)
const
;
virtual
HeapWord
*
allocate_new_tlab
(
size_t
word_size
);
// Can a compiler initialize a new object without store barriers?
// Can a compiler initialize a new object without store barriers?
// This permission only extends from the creation of a new object
// This permission only extends from the creation of a new object
...
@@ -1186,7 +1376,6 @@ public:
...
@@ -1186,7 +1376,6 @@ public:
static
G1CollectedHeap
*
heap
();
static
G1CollectedHeap
*
heap
();
void
empty_young_list
();
void
empty_young_list
();
bool
should_set_young_locked
();
void
set_region_short_lived_locked
(
HeapRegion
*
hr
);
void
set_region_short_lived_locked
(
HeapRegion
*
hr
);
// add appropriate methods for any other surv rate groups
// add appropriate methods for any other surv rate groups
...
@@ -1339,8 +1528,6 @@ public:
...
@@ -1339,8 +1528,6 @@ public:
protected:
protected:
size_t
_max_heap_capacity
;
size_t
_max_heap_capacity
;
// debug_only(static void check_for_valid_allocation_state();)
public:
public:
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
// Temporary: call to mark things unimplemented for the G1 heap (e.g.,
// MemoryService). In productization, we can make this assert false
// MemoryService). In productization, we can make this assert false
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp
浏览文件 @
5a82c0b0
...
@@ -27,6 +27,7 @@
...
@@ -27,6 +27,7 @@
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "utilities/taskqueue.hpp"
#include "utilities/taskqueue.hpp"
...
@@ -58,37 +59,114 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
...
@@ -58,37 +59,114 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
return
r
!=
NULL
&&
r
->
in_collection_set
();
return
r
!=
NULL
&&
r
->
in_collection_set
();
}
}
inline
HeapWord
*
G1CollectedHeap
::
attempt_allocation
(
size_t
word_size
,
// See the comment in the .hpp file about the locking protocol and
bool
permit_collection_pause
)
{
// assumptions of this method (and other related ones).
HeapWord
*
res
=
NULL
;
inline
HeapWord
*
G1CollectedHeap
::
allocate_from_cur_alloc_region
(
HeapRegion
*
cur_alloc_region
,
assert
(
SafepointSynchronize
::
is_at_safepoint
()
||
size_t
word_size
)
{
Heap_lock
->
owned_by_self
(),
"pre-condition of the call"
);
assert_heap_locked_and_not_at_safepoint
();
assert
(
cur_alloc_region
!=
NULL
,
"pre-condition of the method"
);
// All humongous allocation requests should go through the slow path in
assert
(
cur_alloc_region
==
_cur_alloc_region
,
"pre-condition of the method"
);
// attempt_allocation_slow().
assert
(
cur_alloc_region
->
is_young
(),
if
(
!
isHumongous
(
word_size
)
&&
_cur_alloc_region
!=
NULL
)
{
"we only support young current alloc regions"
);
// If this allocation causes a region to become non empty,
assert
(
!
isHumongous
(
word_size
),
"allocate_from_cur_alloc_region() "
// then we need to update our free_regions count.
"should not be used for humongous allocations"
);
assert
(
!
cur_alloc_region
->
isHumongous
(),
"Catch a regression of this bug."
);
if
(
_cur_alloc_region
->
is_empty
())
{
res
=
_cur_alloc_region
->
allocate
(
word_size
);
assert
(
!
cur_alloc_region
->
is_empty
(),
if
(
res
!=
NULL
)
err_msg
(
"region ["
PTR_FORMAT
","
PTR_FORMAT
"] should not be empty"
,
_free_regions
--
;
cur_alloc_region
->
bottom
(),
cur_alloc_region
->
end
()));
}
else
{
// This allocate method does BOT updates and we don't need them in
res
=
_cur_alloc_region
->
allocate
(
word_size
);
// the young generation. This will be fixed in the near future by
// CR 6994297.
HeapWord
*
result
=
cur_alloc_region
->
allocate
(
word_size
);
if
(
result
!=
NULL
)
{
assert
(
is_in
(
result
),
"result should be in the heap"
);
Heap_lock
->
unlock
();
// Do the dirtying after we release the Heap_lock.
dirty_young_block
(
result
,
word_size
);
return
result
;
}
}
if
(
res
!=
NULL
)
{
assert_heap_locked
();
if
(
!
SafepointSynchronize
::
is_at_safepoint
())
{
return
NULL
;
assert
(
Heap_lock
->
owned_by_self
(),
"invariant"
);
}
Heap_lock
->
unlock
();
// See the comment in the .hpp file about the locking protocol and
// assumptions of this method (and other related ones).
inline
HeapWord
*
G1CollectedHeap
::
attempt_allocation
(
size_t
word_size
)
{
assert_heap_locked_and_not_at_safepoint
();
assert
(
!
isHumongous
(
word_size
),
"attempt_allocation() should not be called "
"for humongous allocation requests"
);
HeapRegion
*
cur_alloc_region
=
_cur_alloc_region
;
if
(
cur_alloc_region
!=
NULL
)
{
HeapWord
*
result
=
allocate_from_cur_alloc_region
(
cur_alloc_region
,
word_size
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
}
return
res
;
assert_heap_locked
();
// Since we couldn't successfully allocate into it, retire the
// current alloc region.
retire_cur_alloc_region
(
cur_alloc_region
);
}
}
// Try to get a new region and allocate out of it
HeapWord
*
result
=
replace_cur_alloc_region_and_allocate
(
word_size
,
false
,
/* at safepoint */
true
/* do_dirtying */
);
if
(
result
!=
NULL
)
{
assert_heap_not_locked
();
return
result
;
}
}
// attempt_allocation_slow will also unlock the heap lock when appropriate.
return
attempt_allocation_slow
(
word_size
,
permit_collection_pause
);
assert_heap_locked
();
return
NULL
;
}
inline
void
G1CollectedHeap
::
retire_cur_alloc_region_common
(
HeapRegion
*
cur_alloc_region
)
{
assert_heap_locked_or_at_safepoint
();
assert
(
cur_alloc_region
!=
NULL
&&
cur_alloc_region
==
_cur_alloc_region
,
"pre-condition of the call"
);
assert
(
cur_alloc_region
->
is_young
(),
"we only support young current alloc regions"
);
// The region is guaranteed to be young
g1_policy
()
->
add_region_to_incremental_cset_lhs
(
cur_alloc_region
);
_summary_bytes_used
+=
cur_alloc_region
->
used
();
_cur_alloc_region
=
NULL
;
}
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
// belongs to a young region.
inline
void
G1CollectedHeap
::
dirty_young_block
(
HeapWord
*
start
,
size_t
word_size
)
{
assert_heap_not_locked
();
// Assign the containing region to containing_hr so that we don't
// have to keep calling heap_region_containing_raw() in the
// asserts below.
DEBUG_ONLY
(
HeapRegion
*
containing_hr
=
heap_region_containing_raw
(
start
);)
assert
(
containing_hr
!=
NULL
&&
start
!=
NULL
&&
word_size
>
0
,
"pre-condition"
);
assert
(
containing_hr
->
is_in
(
start
),
"it should contain start"
);
assert
(
containing_hr
->
is_young
(),
"it should be young"
);
assert
(
!
containing_hr
->
isHumongous
(),
"it should not be humongous"
);
HeapWord
*
end
=
start
+
word_size
;
assert
(
containing_hr
->
is_in
(
end
-
1
),
"it should also contain end - 1"
);
MemRegion
mr
(
start
,
end
);
((
CardTableModRefBS
*
)
_g1h
->
barrier_set
())
->
dirty
(
mr
);
}
}
inline
RefToScanQueue
*
G1CollectedHeap
::
task_queue
(
int
i
)
const
{
inline
RefToScanQueue
*
G1CollectedHeap
::
task_queue
(
int
i
)
const
{
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
浏览文件 @
5a82c0b0
...
@@ -458,8 +458,8 @@ void G1CollectorPolicy::calculate_young_list_min_length() {
...
@@ -458,8 +458,8 @@ void G1CollectorPolicy::calculate_young_list_min_length() {
double
now_sec
=
os
::
elapsedTime
();
double
now_sec
=
os
::
elapsedTime
();
double
when_ms
=
_mmu_tracker
->
when_max_gc_sec
(
now_sec
)
*
1000.0
;
double
when_ms
=
_mmu_tracker
->
when_max_gc_sec
(
now_sec
)
*
1000.0
;
double
alloc_rate_ms
=
predict_alloc_rate_ms
();
double
alloc_rate_ms
=
predict_alloc_rate_ms
();
int
min_regions
=
(
in
t
)
ceil
(
alloc_rate_ms
*
when_ms
);
size_t
min_regions
=
(
size_
t
)
ceil
(
alloc_rate_ms
*
when_ms
);
int
current_region_num
=
(
int
)
_g1
->
young_list
()
->
length
();
size_t
current_region_num
=
_g1
->
young_list
()
->
length
();
_young_list_min_length
=
min_regions
+
current_region_num
;
_young_list_min_length
=
min_regions
+
current_region_num
;
}
}
}
}
...
@@ -473,9 +473,12 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
...
@@ -473,9 +473,12 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
_young_list_target_length
=
_young_list_fixed_length
;
_young_list_target_length
=
_young_list_fixed_length
;
else
else
_young_list_target_length
=
_young_list_fixed_length
/
2
;
_young_list_target_length
=
_young_list_fixed_length
/
2
;
_young_list_target_length
=
MAX2
(
_young_list_target_length
,
(
size_t
)
1
);
}
}
// Make sure we allow the application to allocate at least one
// region before we need to do a collection again.
size_t
min_length
=
_g1
->
young_list
()
->
length
()
+
1
;
_young_list_target_length
=
MAX2
(
_young_list_target_length
,
min_length
);
calculate_survivors_policy
();
calculate_survivors_policy
();
}
}
...
@@ -568,7 +571,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
...
@@ -568,7 +571,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
// we should have at least one region in the target young length
// we should have at least one region in the target young length
_young_list_target_length
=
_young_list_target_length
=
MAX2
((
size_t
)
1
,
final_young_length
+
_recorded_survivor_regions
)
;
final_young_length
+
_recorded_survivor_regions
;
// let's keep an eye of how long we spend on this calculation
// let's keep an eye of how long we spend on this calculation
// right now, I assume that we'll print it when we need it; we
// right now, I assume that we'll print it when we need it; we
...
@@ -617,8 +620,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
...
@@ -617,8 +620,7 @@ void G1CollectorPolicy::calculate_young_list_target_length(size_t rs_lengths) {
_young_list_min_length
);
_young_list_min_length
);
#endif // TRACE_CALC_YOUNG_LENGTH
#endif // TRACE_CALC_YOUNG_LENGTH
// we'll do the pause as soon as possible by choosing the minimum
// we'll do the pause as soon as possible by choosing the minimum
_young_list_target_length
=
_young_list_target_length
=
_young_list_min_length
;
MAX2
(
_young_list_min_length
,
(
size_t
)
1
);
}
}
_rs_lengths_prediction
=
rs_lengths
;
_rs_lengths_prediction
=
rs_lengths
;
...
@@ -801,7 +803,7 @@ void G1CollectorPolicy::record_full_collection_end() {
...
@@ -801,7 +803,7 @@ void G1CollectorPolicy::record_full_collection_end() {
_survivor_surv_rate_group
->
reset
();
_survivor_surv_rate_group
->
reset
();
calculate_young_list_min_length
();
calculate_young_list_min_length
();
calculate_young_list_target_length
();
calculate_young_list_target_length
();
}
}
void
G1CollectorPolicy
::
record_before_bytes
(
size_t
bytes
)
{
void
G1CollectorPolicy
::
record_before_bytes
(
size_t
bytes
)
{
_bytes_in_to_space_before_gc
+=
bytes
;
_bytes_in_to_space_before_gc
+=
bytes
;
...
@@ -824,9 +826,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
...
@@ -824,9 +826,9 @@ void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
gclog_or_tty
->
print
(
" (%s)"
,
full_young_gcs
()
?
"young"
:
"partial"
);
gclog_or_tty
->
print
(
" (%s)"
,
full_young_gcs
()
?
"young"
:
"partial"
);
}
}
assert
(
_g1
->
used
_regions
()
==
_g1
->
recalculate_used_regions
(),
assert
(
_g1
->
used
()
==
_g1
->
recalculate_used
(),
"sanity"
);
err_msg
(
"sanity, used: "
SIZE_FORMAT
" recalculate_used: "
SIZE_FORMAT
,
assert
(
_g1
->
used
()
==
_g1
->
recalculate_used
(),
"sanity"
);
_g1
->
used
(),
_g1
->
recalculate_used
())
);
double
s_w_t_ms
=
(
start_time_sec
-
_stop_world_start
)
*
1000.0
;
double
s_w_t_ms
=
(
start_time_sec
-
_stop_world_start
)
*
1000.0
;
_all_stop_world_times_ms
->
add
(
s_w_t_ms
);
_all_stop_world_times_ms
->
add
(
s_w_t_ms
);
...
@@ -2266,24 +2268,13 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
...
@@ -2266,24 +2268,13 @@ void G1CollectorPolicy::print_yg_surv_rate_info() const {
#endif // PRODUCT
#endif // PRODUCT
}
}
bool
void
G1CollectorPolicy
::
should_add_next_region_to_young_list
()
{
G1CollectorPolicy
::
update_region_num
(
bool
young
)
{
assert
(
in_young_gc_mode
(),
"should be in young GC mode"
);
if
(
young
)
{
bool
ret
;
size_t
young_list_length
=
_g1
->
young_list
()
->
length
();
size_t
young_list_max_length
=
_young_list_target_length
;
if
(
G1FixedEdenSize
)
{
young_list_max_length
-=
_max_survivor_regions
;
}
if
(
young_list_length
<
young_list_max_length
)
{
ret
=
true
;
++
_region_num_young
;
++
_region_num_young
;
}
else
{
}
else
{
ret
=
false
;
++
_region_num_tenured
;
++
_region_num_tenured
;
}
}
return
ret
;
}
}
#ifndef PRODUCT
#ifndef PRODUCT
...
@@ -2327,32 +2318,6 @@ void G1CollectorPolicy::calculate_survivors_policy()
...
@@ -2327,32 +2318,6 @@ void G1CollectorPolicy::calculate_survivors_policy()
}
}
}
}
bool
G1CollectorPolicy_BestRegionsFirst
::
should_do_collection_pause
(
size_t
word_size
)
{
assert
(
_g1
->
regions_accounted_for
(),
"Region leakage!"
);
double
max_pause_time_ms
=
_mmu_tracker
->
max_gc_time
()
*
1000.0
;
size_t
young_list_length
=
_g1
->
young_list
()
->
length
();
size_t
young_list_max_length
=
_young_list_target_length
;
if
(
G1FixedEdenSize
)
{
young_list_max_length
-=
_max_survivor_regions
;
}
bool
reached_target_length
=
young_list_length
>=
young_list_max_length
;
if
(
in_young_gc_mode
())
{
if
(
reached_target_length
)
{
assert
(
young_list_length
>
0
&&
_g1
->
young_list
()
->
length
()
>
0
,
"invariant"
);
return
true
;
}
}
else
{
guarantee
(
false
,
"should not reach here"
);
}
return
false
;
}
#ifndef PRODUCT
#ifndef PRODUCT
class
HRSortIndexIsOKClosure
:
public
HeapRegionClosure
{
class
HRSortIndexIsOKClosure
:
public
HeapRegionClosure
{
CollectionSetChooser
*
_chooser
;
CollectionSetChooser
*
_chooser
;
...
...
src/share/vm/gc_implementation/g1/g1CollectorPolicy.hpp
浏览文件 @
5a82c0b0
...
@@ -993,11 +993,6 @@ public:
...
@@ -993,11 +993,6 @@ public:
void
record_before_bytes
(
size_t
bytes
);
void
record_before_bytes
(
size_t
bytes
);
void
record_after_bytes
(
size_t
bytes
);
void
record_after_bytes
(
size_t
bytes
);
// Returns "true" if this is a good time to do a collection pause.
// The "word_size" argument, if non-zero, indicates the size of an
// allocation request that is prompting this query.
virtual
bool
should_do_collection_pause
(
size_t
word_size
)
=
0
;
// Choose a new collection set. Marks the chosen regions as being
// Choose a new collection set. Marks the chosen regions as being
// "in_collection_set", and links them together. The head and number of
// "in_collection_set", and links them together. The head and number of
// the collection set are available via access methods.
// the collection set are available via access methods.
...
@@ -1116,7 +1111,16 @@ public:
...
@@ -1116,7 +1111,16 @@ public:
// do that for any other surv rate groups
// do that for any other surv rate groups
}
}
bool
should_add_next_region_to_young_list
();
bool
is_young_list_full
()
{
size_t
young_list_length
=
_g1
->
young_list
()
->
length
();
size_t
young_list_max_length
=
_young_list_target_length
;
if
(
G1FixedEdenSize
)
{
young_list_max_length
-=
_max_survivor_regions
;
}
return
young_list_length
>=
young_list_max_length
;
}
void
update_region_num
(
bool
young
);
bool
in_young_gc_mode
()
{
bool
in_young_gc_mode
()
{
return
_in_young_gc_mode
;
return
_in_young_gc_mode
;
...
@@ -1270,7 +1274,6 @@ public:
...
@@ -1270,7 +1274,6 @@ public:
_collectionSetChooser
=
new
CollectionSetChooser
();
_collectionSetChooser
=
new
CollectionSetChooser
();
}
}
void
record_collection_pause_end
();
void
record_collection_pause_end
();
bool
should_do_collection_pause
(
size_t
word_size
);
// This is not needed any more, after the CSet choosing code was
// This is not needed any more, after the CSet choosing code was
// changed to use the pause prediction work. But let's leave the
// changed to use the pause prediction work. But let's leave the
// hook in just in case.
// hook in just in case.
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
浏览文件 @
5a82c0b0
...
@@ -27,13 +27,22 @@
...
@@ -27,13 +27,22 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/shared/isGCActiveMark.hpp"
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/interfaceSupport.hpp"
VM_G1CollectForAllocation
::
VM_G1CollectForAllocation
(
unsigned
int
gc_count_before
,
size_t
word_size
)
:
VM_G1OperationWithAllocRequest
(
gc_count_before
,
word_size
)
{
guarantee
(
word_size
>
0
,
"an allocation should always be requested"
);
}
void
VM_G1CollectForAllocation
::
doit
()
{
void
VM_G1CollectForAllocation
::
doit
()
{
JvmtiGCForAllocationMarker
jgcm
;
JvmtiGCForAllocationMarker
jgcm
;
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
_res
=
g1h
->
satisfy_failed_allocation
(
_size
);
_result
=
g1h
->
satisfy_failed_allocation
(
_word_size
,
&
_pause_succeeded
);
assert
(
g1h
->
is_in_or_null
(
_res
),
"result not in heap"
);
assert
(
_result
==
NULL
||
_pause_succeeded
,
"if we get back a result, the pause should have succeeded"
);
}
}
void
VM_G1CollectFull
::
doit
()
{
void
VM_G1CollectFull
::
doit
()
{
...
@@ -43,6 +52,25 @@ void VM_G1CollectFull::doit() {
...
@@ -43,6 +52,25 @@ void VM_G1CollectFull::doit() {
g1h
->
do_full_collection
(
false
/* clear_all_soft_refs */
);
g1h
->
do_full_collection
(
false
/* clear_all_soft_refs */
);
}
}
VM_G1IncCollectionPause
::
VM_G1IncCollectionPause
(
unsigned
int
gc_count_before
,
size_t
word_size
,
bool
should_initiate_conc_mark
,
double
target_pause_time_ms
,
GCCause
::
Cause
gc_cause
)
:
VM_G1OperationWithAllocRequest
(
gc_count_before
,
word_size
),
_should_initiate_conc_mark
(
should_initiate_conc_mark
),
_target_pause_time_ms
(
target_pause_time_ms
),
_full_collections_completed_before
(
0
)
{
guarantee
(
target_pause_time_ms
>
0.0
,
err_msg
(
"target_pause_time_ms = %1.6lf should be positive"
,
target_pause_time_ms
));
guarantee
(
word_size
==
0
||
gc_cause
==
GCCause
::
_g1_inc_collection_pause
,
"we can only request an allocation if the GC cause is for "
"an incremental GC pause"
);
_gc_cause
=
gc_cause
;
}
void
VM_G1IncCollectionPause
::
doit
()
{
void
VM_G1IncCollectionPause
::
doit
()
{
JvmtiGCForAllocationMarker
jgcm
;
JvmtiGCForAllocationMarker
jgcm
;
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
G1CollectedHeap
*
g1h
=
G1CollectedHeap
::
heap
();
...
@@ -51,6 +79,18 @@ void VM_G1IncCollectionPause::doit() {
...
@@ -51,6 +79,18 @@ void VM_G1IncCollectionPause::doit() {
(
_gc_cause
==
GCCause
::
_java_lang_system_gc
&&
ExplicitGCInvokesConcurrent
)),
(
_gc_cause
==
GCCause
::
_java_lang_system_gc
&&
ExplicitGCInvokesConcurrent
)),
"only a GC locker or a System.gc() induced GC should start a cycle"
);
"only a GC locker or a System.gc() induced GC should start a cycle"
);
if
(
_word_size
>
0
)
{
// An allocation has been requested. So, try to do that first.
_result
=
g1h
->
attempt_allocation_at_safepoint
(
_word_size
,
false
/* expect_null_cur_alloc_region */
);
if
(
_result
!=
NULL
)
{
// If we can successfully allocate before we actually do the
// pause then we will consider this pause successful.
_pause_succeeded
=
true
;
return
;
}
}
GCCauseSetter
x
(
g1h
,
_gc_cause
);
GCCauseSetter
x
(
g1h
,
_gc_cause
);
if
(
_should_initiate_conc_mark
)
{
if
(
_should_initiate_conc_mark
)
{
// It's safer to read full_collections_completed() here, given
// It's safer to read full_collections_completed() here, given
...
@@ -63,7 +103,16 @@ void VM_G1IncCollectionPause::doit() {
...
@@ -63,7 +103,16 @@ void VM_G1IncCollectionPause::doit() {
// will do so if one is not already in progress.
// will do so if one is not already in progress.
bool
res
=
g1h
->
g1_policy
()
->
force_initial_mark_if_outside_cycle
();
bool
res
=
g1h
->
g1_policy
()
->
force_initial_mark_if_outside_cycle
();
}
}
_pause_succeeded
=
g1h
->
do_collection_pause_at_safepoint
(
_target_pause_time_ms
);
g1h
->
do_collection_pause_at_safepoint
(
_target_pause_time_ms
);
if
(
_pause_succeeded
&&
_word_size
>
0
)
{
// An allocation had been requested.
_result
=
g1h
->
attempt_allocation_at_safepoint
(
_word_size
,
true
/* expect_null_cur_alloc_region */
);
}
else
{
assert
(
_result
==
NULL
,
"invariant"
);
}
}
}
void
VM_G1IncCollectionPause
::
doit_epilogue
()
{
void
VM_G1IncCollectionPause
::
doit_epilogue
()
{
...
...
src/share/vm/gc_implementation/g1/vm_operations_g1.hpp
浏览文件 @
5a82c0b0
...
@@ -31,19 +31,33 @@
...
@@ -31,19 +31,33 @@
// VM_GC_Operation:
// VM_GC_Operation:
// - VM_CGC_Operation
// - VM_CGC_Operation
// - VM_G1CollectFull
// - VM_G1CollectFull
// - VM_G1OperationWithAllocRequest
// - VM_G1CollectForAllocation
// - VM_G1CollectForAllocation
// - VM_G1IncCollectionPause
// - VM_G1IncCollectionPause
// - VM_G1PopRegionCollectionPause
class
VM_G1OperationWithAllocRequest
:
public
VM_GC_Operation
{
protected:
size_t
_word_size
;
HeapWord
*
_result
;
bool
_pause_succeeded
;
public:
VM_G1OperationWithAllocRequest
(
unsigned
int
gc_count_before
,
size_t
word_size
)
:
VM_GC_Operation
(
gc_count_before
),
_word_size
(
word_size
),
_result
(
NULL
),
_pause_succeeded
(
false
)
{
}
HeapWord
*
result
()
{
return
_result
;
}
bool
pause_succeeded
()
{
return
_pause_succeeded
;
}
};
class
VM_G1CollectFull
:
public
VM_GC_Operation
{
class
VM_G1CollectFull
:
public
VM_GC_Operation
{
public:
public:
VM_G1CollectFull
(
unsigned
int
gc_count_before
,
VM_G1CollectFull
(
unsigned
int
gc_count_before
,
unsigned
int
full_gc_count_before
,
unsigned
int
full_gc_count_before
,
GCCause
::
Cause
cause
)
GCCause
::
Cause
cause
)
:
VM_GC_Operation
(
gc_count_before
,
full_gc_count_before
)
{
:
VM_GC_Operation
(
gc_count_before
,
full_gc_count_before
)
{
_gc_cause
=
cause
;
_gc_cause
=
cause
;
}
}
~
VM_G1CollectFull
()
{}
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1CollectFull
;
}
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1CollectFull
;
}
virtual
void
doit
();
virtual
void
doit
();
virtual
const
char
*
name
()
const
{
virtual
const
char
*
name
()
const
{
...
@@ -51,45 +65,28 @@ class VM_G1CollectFull: public VM_GC_Operation {
...
@@ -51,45 +65,28 @@ class VM_G1CollectFull: public VM_GC_Operation {
}
}
};
};
class
VM_G1CollectForAllocation
:
public
VM_GC_Operation
{
class
VM_G1CollectForAllocation
:
public
VM_G1OperationWithAllocRequest
{
private:
public:
HeapWord
*
_res
;
VM_G1CollectForAllocation
(
unsigned
int
gc_count_before
,
size_t
_size
;
// size of object to be allocated
size_t
word_size
);
public:
VM_G1CollectForAllocation
(
size_t
size
,
int
gc_count_before
)
:
VM_GC_Operation
(
gc_count_before
)
{
_size
=
size
;
_res
=
NULL
;
}
~
VM_G1CollectForAllocation
()
{}
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1CollectForAllocation
;
}
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1CollectForAllocation
;
}
virtual
void
doit
();
virtual
void
doit
();
virtual
const
char
*
name
()
const
{
virtual
const
char
*
name
()
const
{
return
"garbage-first collection to satisfy allocation"
;
return
"garbage-first collection to satisfy allocation"
;
}
}
HeapWord
*
result
()
{
return
_res
;
}
};
};
class
VM_G1IncCollectionPause
:
public
VM_G
C_Operation
{
class
VM_G1IncCollectionPause
:
public
VM_G
1OperationWithAllocRequest
{
private:
private:
bool
_should_initiate_conc_mark
;
bool
_should_initiate_conc_mark
;
double
_target_pause_time_ms
;
double
_target_pause_time_ms
;
unsigned
int
_full_collections_completed_before
;
unsigned
int
_full_collections_completed_before
;
public:
public:
VM_G1IncCollectionPause
(
unsigned
int
gc_count_before
,
VM_G1IncCollectionPause
(
unsigned
int
gc_count_before
,
size_t
word_size
,
bool
should_initiate_conc_mark
,
bool
should_initiate_conc_mark
,
double
target_pause_time_ms
,
double
target_pause_time_ms
,
GCCause
::
Cause
cause
)
GCCause
::
Cause
gc_cause
);
:
VM_GC_Operation
(
gc_count_before
),
_full_collections_completed_before
(
0
),
_should_initiate_conc_mark
(
should_initiate_conc_mark
),
_target_pause_time_ms
(
target_pause_time_ms
)
{
guarantee
(
target_pause_time_ms
>
0.0
,
err_msg
(
"target_pause_time_ms = %1.6lf should be positive"
,
target_pause_time_ms
));
_gc_cause
=
cause
;
}
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1IncCollectionPause
;
}
virtual
VMOp_Type
type
()
const
{
return
VMOp_G1IncCollectionPause
;
}
virtual
void
doit
();
virtual
void
doit
();
virtual
void
doit_epilogue
();
virtual
void
doit_epilogue
();
...
@@ -103,14 +100,9 @@ public:
...
@@ -103,14 +100,9 @@ public:
class
VM_CGC_Operation
:
public
VM_Operation
{
class
VM_CGC_Operation
:
public
VM_Operation
{
VoidClosure
*
_cl
;
VoidClosure
*
_cl
;
const
char
*
_printGCMessage
;
const
char
*
_printGCMessage
;
public:
public:
VM_CGC_Operation
(
VoidClosure
*
cl
,
const
char
*
printGCMsg
)
:
VM_CGC_Operation
(
VoidClosure
*
cl
,
const
char
*
printGCMsg
)
_cl
(
cl
),
:
_cl
(
cl
),
_printGCMessage
(
printGCMsg
)
{
}
_printGCMessage
(
printGCMsg
)
{}
~
VM_CGC_Operation
()
{}
virtual
VMOp_Type
type
()
const
{
return
VMOp_CGC_Operation
;
}
virtual
VMOp_Type
type
()
const
{
return
VMOp_CGC_Operation
;
}
virtual
void
doit
();
virtual
void
doit
();
virtual
bool
doit_prologue
();
virtual
bool
doit_prologue
();
...
...
src/share/vm/memory/referenceProcessor.cpp
浏览文件 @
5a82c0b0
...
@@ -770,8 +770,7 @@ void ReferenceProcessor::abandon_partial_discovery() {
...
@@ -770,8 +770,7 @@ void ReferenceProcessor::abandon_partial_discovery() {
// loop over the lists
// loop over the lists
for
(
int
i
=
0
;
i
<
_max_num_q
*
subclasses_of_ref
;
i
++
)
{
for
(
int
i
=
0
;
i
<
_max_num_q
*
subclasses_of_ref
;
i
++
)
{
if
(
TraceReferenceGC
&&
PrintGCDetails
&&
((
i
%
_max_num_q
)
==
0
))
{
if
(
TraceReferenceGC
&&
PrintGCDetails
&&
((
i
%
_max_num_q
)
==
0
))
{
gclog_or_tty
->
print_cr
(
gclog_or_tty
->
print_cr
(
"
\n
Abandoning %s discovered list"
,
"
\n
Abandoning %s discovered list"
,
list_name
(
i
));
list_name
(
i
));
}
}
abandon_partial_discovered_list
(
_discoveredSoftRefs
[
i
]);
abandon_partial_discovered_list
(
_discoveredSoftRefs
[
i
]);
...
@@ -1059,9 +1058,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
...
@@ -1059,9 +1058,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
// During a multi-threaded discovery phase,
// During a multi-threaded discovery phase,
// each thread saves to its "own" list.
// each thread saves to its "own" list.
Thread
*
thr
=
Thread
::
current
();
Thread
*
thr
=
Thread
::
current
();
assert
(
thr
->
is_GC_task_thread
(),
id
=
thr
->
as_Worker_thread
()
->
id
();
"Dubious cast from Thread* to WorkerThread*?"
);
id
=
((
WorkerThread
*
)
thr
)
->
id
();
}
else
{
}
else
{
// single-threaded discovery, we save in round-robin
// single-threaded discovery, we save in round-robin
// fashion to each of the lists.
// fashion to each of the lists.
...
@@ -1095,8 +1092,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
...
@@ -1095,8 +1092,7 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
ShouldNotReachHere
();
ShouldNotReachHere
();
}
}
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
if
(
TraceReferenceGC
&&
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
"Thread %d gets list "
INTPTR_FORMAT
,
gclog_or_tty
->
print_cr
(
"Thread %d gets list "
INTPTR_FORMAT
,
id
,
list
);
id
,
list
);
}
}
return
list
;
return
list
;
}
}
...
@@ -1135,6 +1131,11 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
...
@@ -1135,6 +1131,11 @@ ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
if
(
_discovered_list_needs_barrier
)
{
if
(
_discovered_list_needs_barrier
)
{
_bs
->
write_ref_field
((
void
*
)
discovered_addr
,
current_head
);
_bs
->
write_ref_field
((
void
*
)
discovered_addr
,
current_head
);
}
}
if
(
TraceReferenceGC
)
{
gclog_or_tty
->
print_cr
(
"Enqueued reference (mt) ("
INTPTR_FORMAT
": %s)"
,
obj
,
obj
->
blueprint
()
->
internal_name
());
}
}
else
{
}
else
{
// If retest was non NULL, another thread beat us to it:
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
// The reference has already been discovered...
...
@@ -1239,8 +1240,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
...
@@ -1239,8 +1240,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
// Check assumption that an object is not potentially
// Check assumption that an object is not potentially
// discovered twice except by concurrent collectors that potentially
// discovered twice except by concurrent collectors that potentially
// trace the same Reference object twice.
// trace the same Reference object twice.
assert
(
UseConcMarkSweepGC
,
assert
(
UseConcMarkSweepGC
||
UseG1GC
,
"Only possible with a
n incremental-update concurrent
collector"
);
"Only possible with a
concurrent marking
collector"
);
return
true
;
return
true
;
}
}
}
}
...
@@ -1293,26 +1294,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
...
@@ -1293,26 +1294,14 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
}
}
list
->
set_head
(
obj
);
list
->
set_head
(
obj
);
list
->
inc_length
(
1
);
list
->
inc_length
(
1
);
}
// In the MT discovery case, it is currently possible to see
// the following message multiple times if several threads
// discover a reference about the same time. Only one will
// however have actually added it to the disocvered queue.
// One could let add_to_discovered_list_mt() return an
// indication for success in queueing (by 1 thread) or
// failure (by all other threads), but I decided the extra
// code was not worth the effort for something that is
// only used for debugging support.
if
(
TraceReferenceGC
)
{
if
(
TraceReferenceGC
)
{
oop
referent
=
java_lang_ref_Reference
::
referent
(
obj
);
if
(
PrintGCDetails
)
{
gclog_or_tty
->
print_cr
(
"Enqueued reference ("
INTPTR_FORMAT
": %s)"
,
gclog_or_tty
->
print_cr
(
"Enqueued reference ("
INTPTR_FORMAT
": %s)"
,
obj
,
obj
->
blueprint
()
->
internal_name
());
obj
,
obj
->
blueprint
()
->
internal_name
());
}
}
assert
(
referent
->
is_oop
(),
"Enqueued a bad referent"
);
}
}
assert
(
obj
->
is_oop
(),
"Enqueued a bad reference"
);
assert
(
obj
->
is_oop
(),
"Enqueued a bad reference"
);
assert
(
java_lang_ref_Reference
::
referent
(
obj
)
->
is_oop
(),
"Enqueued a bad referent"
);
return
true
;
return
true
;
}
}
...
...
src/share/vm/runtime/thread.hpp
浏览文件 @
5a82c0b0
...
@@ -78,6 +78,8 @@ class GCTaskQueue;
...
@@ -78,6 +78,8 @@ class GCTaskQueue;
class
ThreadClosure
;
class
ThreadClosure
;
class
IdealGraphPrinter
;
class
IdealGraphPrinter
;
class
WorkerThread
;
// Class hierarchy
// Class hierarchy
// - Thread
// - Thread
// - NamedThread
// - NamedThread
...
@@ -289,6 +291,10 @@ class Thread: public ThreadShadow {
...
@@ -289,6 +291,10 @@ class Thread: public ThreadShadow {
virtual
bool
is_Watcher_thread
()
const
{
return
false
;
}
virtual
bool
is_Watcher_thread
()
const
{
return
false
;
}
virtual
bool
is_ConcurrentGC_thread
()
const
{
return
false
;
}
virtual
bool
is_ConcurrentGC_thread
()
const
{
return
false
;
}
virtual
bool
is_Named_thread
()
const
{
return
false
;
}
virtual
bool
is_Named_thread
()
const
{
return
false
;
}
virtual
bool
is_Worker_thread
()
const
{
return
false
;
}
// Casts
virtual
WorkerThread
*
as_Worker_thread
()
const
{
return
NULL
;
}
virtual
char
*
name
()
const
{
return
(
char
*
)
"Unknown thread"
;
}
virtual
char
*
name
()
const
{
return
(
char
*
)
"Unknown thread"
;
}
...
@@ -629,6 +635,13 @@ private:
...
@@ -629,6 +635,13 @@ private:
uint
_id
;
uint
_id
;
public:
public:
WorkerThread
()
:
_id
(
0
)
{
}
WorkerThread
()
:
_id
(
0
)
{
}
virtual
bool
is_Worker_thread
()
const
{
return
true
;
}
virtual
WorkerThread
*
as_Worker_thread
()
const
{
assert
(
is_Worker_thread
(),
"Dubious cast to WorkerThread*?"
);
return
(
WorkerThread
*
)
this
;
}
void
set_id
(
uint
work_id
)
{
_id
=
work_id
;
}
void
set_id
(
uint
work_id
)
{
_id
=
work_id
;
}
uint
id
()
const
{
return
_id
;
}
uint
id
()
const
{
return
_id
;
}
};
};
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录