Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
dragonwell8_hotspot
提交
b76f4992
D
dragonwell8_hotspot
项目概览
openanolis
/
dragonwell8_hotspot
通知
2
Star
2
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
D
dragonwell8_hotspot
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b76f4992
编写于
11月 19, 2010
作者:
C
coleenp
浏览文件
操作
浏览文件
下载
差异文件
Merge
上级
8c0ce4e7
22d4b2a0
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
157 addition
and
104 deletion
+157
-104
src/share/vm/c1/c1_Compilation.cpp
src/share/vm/c1/c1_Compilation.cpp
+8
-1
src/share/vm/compiler/compileBroker.cpp
src/share/vm/compiler/compileBroker.cpp
+2
-9
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
...entation/concurrentMarkSweep/compactibleFreeListSpace.cpp
+2
-1
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+1
-0
src/share/vm/gc_implementation/g1/g1RemSet.cpp
src/share/vm/gc_implementation/g1/g1RemSet.cpp
+6
-22
src/share/vm/gc_implementation/g1/g1RemSet.hpp
src/share/vm/gc_implementation/g1/g1RemSet.hpp
+44
-28
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
+71
-34
src/share/vm/includeDB_core
src/share/vm/includeDB_core
+1
-0
src/share/vm/memory/universe.cpp
src/share/vm/memory/universe.cpp
+3
-1
src/share/vm/opto/graphKit.cpp
src/share/vm/opto/graphKit.cpp
+2
-1
src/share/vm/runtime/arguments.cpp
src/share/vm/runtime/arguments.cpp
+12
-3
src/share/vm/runtime/init.cpp
src/share/vm/runtime/init.cpp
+1
-0
src/share/vm/runtime/simpleThresholdPolicy.cpp
src/share/vm/runtime/simpleThresholdPolicy.cpp
+4
-4
未找到文件。
src/share/vm/c1/c1_Compilation.cpp
浏览文件 @
b76f4992
...
...
@@ -471,7 +471,14 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
_exception_info_list
=
new
ExceptionInfoList
();
_implicit_exception_table
.
set_size
(
0
);
compile_method
();
if
(
is_profiling
()
&&
_would_profile
)
{
if
(
bailed_out
())
{
_env
->
record_method_not_compilable
(
bailout_msg
(),
!
TieredCompilation
);
if
(
is_profiling
())
{
// Compilation failed, create MDO, which would signal the interpreter
// to start profiling on its own.
_method
->
build_method_data
();
}
}
else
if
(
is_profiling
()
&&
_would_profile
)
{
ciMethodData
*
md
=
method
->
method_data
();
assert
(
md
!=
NULL
,
"Should have MDO"
);
md
->
set_would_profile
(
_would_profile
);
...
...
src/share/vm/compiler/compileBroker.cpp
浏览文件 @
b76f4992
...
...
@@ -1535,7 +1535,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
//assert(false, "compiler should always document failure");
// The compiler elected, without comment, not to register a result.
// Do not attempt further compilations of this method.
ci_env
.
record_method_not_compilable
(
"compile failed"
);
ci_env
.
record_method_not_compilable
(
"compile failed"
,
!
TieredCompilation
);
}
if
(
ci_env
.
failing
())
{
...
...
@@ -1544,15 +1544,8 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
if
(
PrintCompilation
)
{
const
char
*
reason
=
ci_env
.
failure_reason
();
if
(
compilable
==
ciEnv
::
MethodCompilable_not_at_tier
)
{
if
(
is_highest_tier_compile
(
ci_env
.
comp_level
()))
{
// Already at highest tier, promote to not compilable.
compilable
=
ciEnv
::
MethodCompilable_never
;
}
else
{
tty
->
print_cr
(
"%3d COMPILE SKIPPED: %s (retry at different tier)"
,
compile_id
,
reason
);
}
}
if
(
compilable
==
ciEnv
::
MethodCompilable_never
)
{
}
else
if
(
compilable
==
ciEnv
::
MethodCompilable_never
)
{
tty
->
print_cr
(
"%3d COMPILE SKIPPED: %s (not retryable)"
,
compile_id
,
reason
);
}
else
if
(
compilable
==
ciEnv
::
MethodCompilable
)
{
tty
->
print_cr
(
"%3d COMPILE SKIPPED: %s"
,
compile_id
,
reason
);
...
...
src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp
浏览文件 @
b76f4992
...
...
@@ -1093,8 +1093,9 @@ bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
// perm_gen_verify_bit_map where we store the "deadness" information if
// we did not sweep the perm gen in the most recent previous GC cycle.
bool
CompactibleFreeListSpace
::
obj_is_alive
(
const
HeapWord
*
p
)
const
{
assert
(
SafepointSynchronize
::
is_at_safepoint
()
||
!
is_init_completed
(),
"Else races are possible"
);
assert
(
block_is_obj
(
p
),
"The address should point to an object"
);
assert
(
SafepointSynchronize
::
is_at_safepoint
(),
"Else races are possible"
);
// If we're sweeping, we use object liveness information from the main bit map
// for both perm gen and old gen.
...
...
src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
浏览文件 @
b76f4992
...
...
@@ -795,6 +795,7 @@ public:
_worker_i
(
worker_i
),
_g1h
(
g1
)
{
}
bool
doHeapRegion
(
HeapRegion
*
r
)
{
if
(
!
r
->
continuesHumongous
())
{
_cl
.
set_from
(
r
);
...
...
src/share/vm/gc_implementation/g1/g1RemSet.cpp
浏览文件 @
b76f4992
...
...
@@ -116,7 +116,6 @@ G1RemSet::G1RemSet(G1CollectedHeap* g1, CardTableModRefBS* ct_bs)
:
_g1
(
g1
),
_conc_refine_cards
(
0
),
_ct_bs
(
ct_bs
),
_g1p
(
_g1
->
g1_policy
()),
_cg1r
(
g1
->
concurrent_g1_refine
()),
_traversal_in_progress
(
false
),
_cset_rs_update_cl
(
NULL
),
_cards_scanned
(
NULL
),
_total_cards_scanned
(
0
)
{
...
...
@@ -512,8 +511,6 @@ void G1RemSet::prepare_for_oops_into_collection_set_do() {
DirtyCardQueueSet
&
dcqs
=
JavaThread
::
dirty_card_queue_set
();
dcqs
.
concatenate_logs
();
assert
(
!
_traversal_in_progress
,
"Invariant between iterations."
);
set_traversal
(
true
);
if
(
ParallelGCThreads
>
0
)
{
_seq_task
->
set_n_threads
((
int
)
n_workers
());
}
...
...
@@ -539,9 +536,6 @@ class cleanUpIteratorsClosure : public HeapRegionClosure {
// through the oops which coincide with that card. It scans the reference
// fields in each oop; when it finds an oop that points into the collection
// set, the RSet for the region containing the referenced object is updated.
// Note: _par_traversal_in_progress in the G1RemSet must be FALSE; otherwise
// the UpdateRSetImmediate closure will cause cards to be enqueued on to
// the DCQS that we're iterating over, causing an infinite loop.
class
UpdateRSetCardTableEntryIntoCSetClosure
:
public
CardTableEntryClosure
{
G1CollectedHeap
*
_g1
;
CardTableModRefBS
*
_ct_bs
;
...
...
@@ -611,8 +605,6 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
// Set all cards back to clean.
_g1
->
cleanUpCardTable
();
set_traversal
(
false
);
DirtyCardQueueSet
&
into_cset_dcqs
=
_g1
->
into_cset_dirty_card_queue_set
();
int
into_cset_n_buffers
=
into_cset_dcqs
.
completed_buffers_num
();
...
...
@@ -645,21 +637,8 @@ void G1RemSet::cleanup_after_oops_into_collection_set_do() {
assert
(
_g1
->
into_cset_dirty_card_queue_set
().
completed_buffers_num
()
==
0
,
"all buffers should be freed"
);
_g1
->
into_cset_dirty_card_queue_set
().
clear_n_completed_buffers
();
assert
(
!
_traversal_in_progress
,
"Invariant between iterations."
);
}
class
UpdateRSObjectClosure
:
public
ObjectClosure
{
UpdateRSOopClosure
*
_update_rs_oop_cl
;
public:
UpdateRSObjectClosure
(
UpdateRSOopClosure
*
update_rs_oop_cl
)
:
_update_rs_oop_cl
(
update_rs_oop_cl
)
{}
void
do_object
(
oop
obj
)
{
obj
->
oop_iterate
(
_update_rs_oop_cl
);
}
};
class
ScrubRSClosure
:
public
HeapRegionClosure
{
G1CollectedHeap
*
_g1h
;
BitMap
*
_region_bm
;
...
...
@@ -749,7 +728,12 @@ bool G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i,
ct_freq_note_card
(
_ct_bs
->
index_for
(
start
));
#endif
UpdateRSOopClosure
update_rs_oop_cl
(
this
,
worker_i
);
assert
(
!
check_for_refs_into_cset
||
_cset_rs_update_cl
[
worker_i
]
!=
NULL
,
"sanity"
);
UpdateRSOrPushRefOopClosure
update_rs_oop_cl
(
_g1
,
_g1
->
g1_rem_set
(),
_cset_rs_update_cl
[
worker_i
],
check_for_refs_into_cset
,
worker_i
);
update_rs_oop_cl
.
set_from
(
r
);
TriggerClosure
trigger_cl
;
...
...
src/share/vm/gc_implementation/g1/g1RemSet.hpp
浏览文件 @
b76f4992
...
...
@@ -59,11 +59,6 @@ protected:
size_t
*
_cards_scanned
;
size_t
_total_cards_scanned
;
// _traversal_in_progress is "true" iff a traversal is in progress.
bool
_traversal_in_progress
;
void
set_traversal
(
bool
b
)
{
_traversal_in_progress
=
b
;
}
// Used for caching the closure that is responsible for scanning
// references into the collection set.
OopsInHeapRegionClosure
**
_cset_rs_update_cl
;
...
...
@@ -76,10 +71,6 @@ protected:
bool
concurrentRefineOneCard_impl
(
jbyte
*
card_ptr
,
int
worker_i
,
bool
check_for_refs_into_cset
);
protected:
template
<
class
T
>
void
write_ref_nv
(
HeapRegion
*
from
,
T
*
p
);
template
<
class
T
>
void
par_write_ref_nv
(
HeapRegion
*
from
,
T
*
p
,
int
tid
);
public:
// This is called to reset dual hash tables after the gc pause
// is finished and the initial hash table is no longer being
...
...
@@ -117,22 +108,8 @@ public:
// Record, if necessary, the fact that *p (where "p" is in region "from",
// which is required to be non-NULL) has changed to a new non-NULL value.
// [Below the virtual version calls a non-virtual protected
// workhorse that is templatified for narrow vs wide oop.]
inline
void
write_ref
(
HeapRegion
*
from
,
oop
*
p
)
{
write_ref_nv
(
from
,
p
);
}
inline
void
write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
)
{
write_ref_nv
(
from
,
p
);
}
inline
void
par_write_ref
(
HeapRegion
*
from
,
oop
*
p
,
int
tid
)
{
par_write_ref_nv
(
from
,
p
,
tid
);
}
inline
void
par_write_ref
(
HeapRegion
*
from
,
narrowOop
*
p
,
int
tid
)
{
par_write_ref_nv
(
from
,
p
,
tid
);
}
bool
self_forwarded
(
oop
obj
);
template
<
class
T
>
void
write_ref
(
HeapRegion
*
from
,
T
*
p
);
template
<
class
T
>
void
par_write_ref
(
HeapRegion
*
from
,
T
*
p
,
int
tid
);
// Requires "region_bm" and "card_bm" to be bitmaps with 1 bit per region
// or card, respectively, such that a region or card with a corresponding
...
...
@@ -186,9 +163,8 @@ class UpdateRSOopClosure: public OopClosure {
public:
UpdateRSOopClosure
(
G1RemSet
*
rs
,
int
worker_i
=
0
)
:
_from
(
NULL
),
_rs
(
rs
),
_worker_i
(
worker_i
)
{
guarantee
(
_rs
!=
NULL
,
"Requires an HRIntoG1RemSet"
);
}
_from
(
NULL
),
_rs
(
rs
),
_worker_i
(
worker_i
)
{}
void
set_from
(
HeapRegion
*
from
)
{
assert
(
from
!=
NULL
,
"from region must be non-NULL"
);
...
...
@@ -215,3 +191,43 @@ public:
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
};
class
UpdateRSOrPushRefOopClosure
:
public
OopClosure
{
G1CollectedHeap
*
_g1
;
G1RemSet
*
_g1_rem_set
;
HeapRegion
*
_from
;
OopsInHeapRegionClosure
*
_push_ref_cl
;
bool
_record_refs_into_cset
;
int
_worker_i
;
template
<
class
T
>
void
do_oop_work
(
T
*
p
);
public:
UpdateRSOrPushRefOopClosure
(
G1CollectedHeap
*
g1h
,
G1RemSet
*
rs
,
OopsInHeapRegionClosure
*
push_ref_cl
,
bool
record_refs_into_cset
,
int
worker_i
=
0
)
:
_g1
(
g1h
),
_g1_rem_set
(
rs
),
_from
(
NULL
),
_record_refs_into_cset
(
record_refs_into_cset
),
_push_ref_cl
(
push_ref_cl
),
_worker_i
(
worker_i
)
{
}
void
set_from
(
HeapRegion
*
from
)
{
assert
(
from
!=
NULL
,
"from region must be non-NULL"
);
_from
=
from
;
}
bool
self_forwarded
(
oop
obj
)
{
bool
result
=
(
obj
->
is_forwarded
()
&&
(
obj
->
forwardee
()
==
obj
));
return
result
;
}
virtual
void
do_oop
(
narrowOop
*
p
)
{
do_oop_work
(
p
);
}
virtual
void
do_oop
(
oop
*
p
)
{
do_oop_work
(
p
);
}
bool
apply_to_weak_ref_discovered_field
()
{
return
true
;
}
};
src/share/vm/gc_implementation/g1/g1RemSet.inline.hpp
浏览文件 @
b76f4992
...
...
@@ -31,17 +31,12 @@ inline size_t G1RemSet::n_workers() {
}
template
<
class
T
>
inline
void
G1RemSet
::
write_ref_nv
(
HeapRegion
*
from
,
T
*
p
)
{
par_write_ref_nv
(
from
,
p
,
0
);
}
inline
bool
G1RemSet
::
self_forwarded
(
oop
obj
)
{
bool
result
=
(
obj
->
is_forwarded
()
&&
(
obj
->
forwardee
()
==
obj
));
return
result
;
inline
void
G1RemSet
::
write_ref
(
HeapRegion
*
from
,
T
*
p
)
{
par_write_ref
(
from
,
p
,
0
);
}
template
<
class
T
>
inline
void
G1RemSet
::
par_write_ref
_nv
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
inline
void
G1RemSet
::
par_write_ref
(
HeapRegion
*
from
,
T
*
p
,
int
tid
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
#ifdef ASSERT
// can't do because of races
...
...
@@ -62,34 +57,15 @@ inline void G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) {
assert
(
from
==
NULL
||
from
->
is_in_reserved
(
p
),
"p is not in from"
);
HeapRegion
*
to
=
_g1
->
heap_region_containing
(
obj
);
// The test below could be optimized by applying a bit op to to and from.
if
(
to
!=
NULL
&&
from
!=
NULL
&&
from
!=
to
)
{
// The _traversal_in_progress flag is true during the collection pause,
// false during the evacuation failure handling. This should avoid a
// potential loop if we were to add the card containing 'p' to the DCQS
// that's used to regenerate the remembered sets for the collection set,
// in the event of an evacuation failure, here. The UpdateRSImmediate
// closure will eventally call this routine.
if
(
_traversal_in_progress
&&
to
->
in_collection_set
()
&&
!
self_forwarded
(
obj
))
{
assert
(
_cset_rs_update_cl
[
tid
]
!=
NULL
,
"should have been set already"
);
_cset_rs_update_cl
[
tid
]
->
do_oop
(
p
);
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
}
else
{
if
(
to
!=
NULL
&&
from
!=
to
)
{
#if G1_REM_SET_LOGGING
gclog_or_tty
->
print_cr
(
"Adding "
PTR_FORMAT
" ("
PTR_FORMAT
") to RS"
" for region ["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
obj
,
to
->
bottom
(),
to
->
end
());
gclog_or_tty
->
print_cr
(
"Adding "
PTR_FORMAT
" ("
PTR_FORMAT
") to RS"
" for region ["
PTR_FORMAT
", "
PTR_FORMAT
")"
,
p
,
obj
,
to
->
bottom
(),
to
->
end
());
#endif
assert
(
to
->
rem_set
()
!=
NULL
,
"Need per-region 'into' remsets."
);
to
->
rem_set
()
->
add_reference
(
p
,
tid
);
}
assert
(
to
->
rem_set
()
!=
NULL
,
"Need per-region 'into' remsets."
);
to
->
rem_set
()
->
add_reference
(
p
,
tid
);
}
}
...
...
@@ -108,3 +84,64 @@ inline void UpdateRSetImmediate::do_oop_work(T* p) {
}
}
template
<
class
T
>
inline
void
UpdateRSOrPushRefOopClosure
::
do_oop_work
(
T
*
p
)
{
oop
obj
=
oopDesc
::
load_decode_heap_oop
(
p
);
#ifdef ASSERT
// can't do because of races
// assert(obj == NULL || obj->is_oop(), "expected an oop");
// Do the safe subset of is_oop
if
(
obj
!=
NULL
)
{
#ifdef CHECK_UNHANDLED_OOPS
oopDesc
*
o
=
obj
.
obj
();
#else
oopDesc
*
o
=
obj
;
#endif // CHECK_UNHANDLED_OOPS
assert
((
intptr_t
)
o
%
MinObjAlignmentInBytes
==
0
,
"not oop aligned"
);
assert
(
Universe
::
heap
()
->
is_in_reserved
(
obj
),
"must be in heap"
);
}
#endif // ASSERT
assert
(
_from
!=
NULL
,
"from region must be non-NULL"
);
HeapRegion
*
to
=
_g1
->
heap_region_containing
(
obj
);
if
(
to
!=
NULL
&&
_from
!=
to
)
{
// The _record_refs_into_cset flag is true during the RSet
// updating part of an evacuation pause. It is false at all
// other times:
// * rebuilding the rembered sets after a full GC
// * during concurrent refinement.
// * updating the remembered sets of regions in the collection
// set in the event of an evacuation failure (when deferred
// updates are enabled).
if
(
_record_refs_into_cset
&&
to
->
in_collection_set
())
{
// We are recording references that point into the collection
// set and this particular reference does exactly that...
// If the referenced object has already been forwarded
// to itself, we are handling an evacuation failure and
// we have already visited/tried to copy this object
// there is no need to retry.
if
(
!
self_forwarded
(
obj
))
{
assert
(
_push_ref_cl
!=
NULL
,
"should not be null"
);
// Push the reference in the refs queue of the G1ParScanThreadState
// instance for this worker thread.
_push_ref_cl
->
do_oop
(
p
);
}
// Deferred updates to the CSet are either discarded (in the normal case),
// or processed (if an evacuation failure occurs) at the end
// of the collection.
// See G1RemSet::cleanup_after_oops_into_collection_set_do().
}
else
{
// We either don't care about pushing references that point into the
// collection set (i.e. we're not during an evacuation pause) _or_
// the reference doesn't point into the collection set. Either way
// we add the reference directly to the RSet of the region containing
// the referenced object.
_g1_rem_set
->
par_write_ref
(
_from
,
p
,
_worker_i
);
}
}
}
src/share/vm/includeDB_core
浏览文件 @
b76f4992
...
...
@@ -4454,6 +4454,7 @@ universe.cpp genRemSet.hpp
universe.cpp generation.hpp
universe.cpp handles.inline.hpp
universe.cpp hashtable.inline.hpp
universe.cpp init.hpp
universe.cpp instanceKlass.hpp
universe.cpp instanceKlassKlass.hpp
universe.cpp instanceRefKlass.hpp
...
...
src/share/vm/memory/universe.cpp
浏览文件 @
b76f4992
...
...
@@ -864,7 +864,8 @@ jint Universe::initialize_heap() {
// compressed oops for pstack code.
if
(
PrintCompressedOopsMode
)
{
tty
->
cr
();
tty
->
print
(
"heap address: "
PTR_FORMAT
,
Universe
::
heap
()
->
base
());
tty
->
print
(
"heap address: "
PTR_FORMAT
", size: "
SIZE_FORMAT
" MB"
,
Universe
::
heap
()
->
base
(),
Universe
::
heap
()
->
reserved_region
().
byte_size
()
/
M
);
}
if
((
uint64_t
)
Universe
::
heap
()
->
reserved_region
().
end
()
>
OopEncodingHeapMax
)
{
// Can't reserve heap below 32Gb.
...
...
@@ -945,6 +946,7 @@ void universe2_init() {
extern
void
initialize_converter_functions
();
bool
universe_post_init
()
{
assert
(
!
is_init_completed
(),
"Error: initialization not yet completed!"
);
Universe
::
_fully_initialized
=
true
;
EXCEPTION_MARK
;
{
ResourceMark
rm
;
...
...
src/share/vm/opto/graphKit.cpp
浏览文件 @
b76f4992
...
...
@@ -569,7 +569,8 @@ void GraphKit::builtin_throw(Deoptimization::DeoptReason reason, Node* arg) {
const
TypePtr
*
adr_typ
=
ex_con
->
add_offset
(
offset
);
Node
*
adr
=
basic_plus_adr
(
ex_node
,
ex_node
,
offset
);
Node
*
store
=
store_oop_to_object
(
control
(),
ex_node
,
adr
,
adr_typ
,
null
(),
ex_con
,
T_OBJECT
);
const
TypeOopPtr
*
val_type
=
TypeOopPtr
::
make_from_klass
(
env
()
->
String_klass
());
Node
*
store
=
store_oop_to_object
(
control
(),
ex_node
,
adr
,
adr_typ
,
null
(),
val_type
,
T_OBJECT
);
add_exception_state
(
make_exception_state
(
ex_node
));
return
;
...
...
src/share/vm/runtime/arguments.cpp
浏览文件 @
b76f4992
...
...
@@ -1341,8 +1341,11 @@ bool verify_object_alignment() {
}
inline
uintx
max_heap_for_compressed_oops
()
{
// Heap should be above HeapBaseMinAddress to get zero based compressed oops.
LP64_ONLY
(
return
OopEncodingHeapMax
-
MaxPermSize
-
os
::
vm_page_size
()
-
HeapBaseMinAddress
);
// Avoid sign flip.
if
(
OopEncodingHeapMax
<
MaxPermSize
+
os
::
vm_page_size
())
{
return
0
;
}
LP64_ONLY
(
return
OopEncodingHeapMax
-
MaxPermSize
-
os
::
vm_page_size
());
NOT_LP64
(
ShouldNotReachHere
();
return
0
);
}
...
...
@@ -1520,7 +1523,13 @@ void Arguments::set_heap_size() {
}
if
(
UseCompressedOops
)
{
// Limit the heap size to the maximum possible when using compressed oops
reasonable_max
=
MIN2
(
reasonable_max
,
(
julong
)
max_heap_for_compressed_oops
());
julong
max_coop_heap
=
(
julong
)
max_heap_for_compressed_oops
();
if
(
HeapBaseMinAddress
+
MaxHeapSize
<
max_coop_heap
)
{
// Heap should be above HeapBaseMinAddress to get zero based compressed oops
// but it should be not less than default MaxHeapSize.
max_coop_heap
-=
HeapBaseMinAddress
;
}
reasonable_max
=
MIN2
(
reasonable_max
,
max_coop_heap
);
}
reasonable_max
=
os
::
allocatable_physical_memory
(
reasonable_max
);
...
...
src/share/vm/runtime/init.cpp
浏览文件 @
b76f4992
...
...
@@ -160,5 +160,6 @@ bool is_init_completed() {
void
set_init_completed
()
{
assert
(
Universe
::
is_fully_initialized
(),
"Should have completed initialization"
);
_init_completed
=
true
;
}
src/share/vm/runtime/simpleThresholdPolicy.cpp
浏览文件 @
b76f4992
...
...
@@ -176,11 +176,11 @@ void SimpleThresholdPolicy::compile(methodHandle mh, int bci, CompLevel level, T
if
(
level
==
CompLevel_none
)
{
return
;
}
// Check if the method can be compiled, if not - try different levels.
// Check if the method can be compiled. If it cannot be compiled with C1, continue profiling
// in the interpreter and then compile with C2 (the transition function will request that,
// see common() ). If the method cannot be compiled with C2 but still can with C1, compile it with
// pure C1.
if
(
!
can_be_compiled
(
mh
,
level
))
{
if
(
level
<
CompLevel_full_optimization
&&
can_be_compiled
(
mh
,
CompLevel_full_optimization
))
{
compile
(
mh
,
bci
,
CompLevel_full_optimization
,
THREAD
);
}
if
(
level
==
CompLevel_full_optimization
&&
can_be_compiled
(
mh
,
CompLevel_simple
))
{
compile
(
mh
,
bci
,
CompLevel_simple
,
THREAD
);
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录