提交 8e833fe9 编写于 作者: X xlu

Merge

......@@ -181,7 +181,7 @@ public:
void scrub(CardTableModRefBS* ctbs, BitMap* card_bm) {
HeapWord* hr_bot = hr()->bottom();
int hr_first_card_index = ctbs->index_for(hr_bot);
size_t hr_first_card_index = ctbs->index_for(hr_bot);
bm()->set_intersection_at_offset(*card_bm, hr_first_card_index);
#if PRT_COUNT_OCCUPIED
recount_occupied();
......
......@@ -726,7 +726,7 @@ HeapWord* ParallelCompactData::calc_new_pointer(HeapWord* addr) {
size_t live_to_left = bitmap->live_words_in_range(search_start, oop(addr));
result += partial_obj_size + live_to_left;
assert(result <= addr, "object cannot move to the right");
DEBUG_ONLY(PSParallelCompact::check_new_location(addr, result);)
return result;
}
......@@ -1472,9 +1472,53 @@ PSParallelCompact::summarize_new_objects(SpaceId id, HeapWord* start)
assert(result, "should not fail: bad filler object size");
}
void
PSParallelCompact::provoke_split_fill_survivor(SpaceId id)
{
if (total_invocations() % (ParallelOldGCSplitInterval * 3) != 0) {
return;
}
MutableSpace* const space = _space_info[id].space();
if (space->is_empty()) {
HeapWord* b = space->bottom();
HeapWord* t = b + space->capacity_in_words() / 2;
space->set_top(t);
if (ZapUnusedHeapArea) {
space->set_top_for_allocations();
}
size_t obj_len = 8;
while (b + obj_len <= t) {
CollectedHeap::fill_with_object(b, obj_len);
mark_bitmap()->mark_obj(b, obj_len);
summary_data().add_obj(b, obj_len);
b += obj_len;
obj_len = (obj_len & 0x18) + 8; // 8 16 24 32 8 16 24 32 ...
}
if (b < t) {
// The loop didn't completely fill to t (top); adjust top downward.
space->set_top(b);
if (ZapUnusedHeapArea) {
space->set_top_for_allocations();
}
}
HeapWord** nta = _space_info[id].new_top_addr();
bool result = summary_data().summarize(_space_info[id].split_info(),
space->bottom(), space->top(), NULL,
space->bottom(), space->end(), nta);
assert(result, "space must fit into itself");
}
}
void
PSParallelCompact::provoke_split(bool & max_compaction)
{
if (total_invocations() % ParallelOldGCSplitInterval != 0) {
return;
}
const size_t region_size = ParallelCompactData::RegionSize;
ParallelCompactData& sd = summary_data();
......@@ -1587,6 +1631,12 @@ void PSParallelCompact::summarize_spaces_quick()
assert(result, "space must fit into itself");
_space_info[i].set_dense_prefix(space->bottom());
}
#ifndef PRODUCT
if (ParallelOldGCSplitALot) {
provoke_split_fill_survivor(to_space_id);
}
#endif // #ifndef PRODUCT
}
void PSParallelCompact::fill_dense_prefix_end(SpaceId id)
......@@ -1794,9 +1844,7 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
}
#ifndef PRODUCT
if (ParallelOldGCSplitALot && old_space_total_live < old_capacity) {
if (total_invocations() % ParallelOldGCSplitInterval == 0) {
provoke_split(maximum_compaction);
}
provoke_split(maximum_compaction);
}
#endif // #ifndef PRODUCT
......@@ -1828,14 +1876,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
new_top_addr);
assert(done, "space must fit into old gen");
// XXX - this is necessary because decrement_destination_counts() tests
// source_region() to determine if a region will be filled. Probably
// better to pass src_space->new_top() into decrement_destination_counts
// and test that instead.
//
// Clear the source_region field for each region in the space.
clear_source_region(space->bottom(), _space_info[id].new_top());
// Reset the new_top value for the space.
_space_info[id].set_new_top(space->bottom());
} else if (live > 0) {
......@@ -1854,7 +1894,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
dst_space_id = SpaceId(id);
dst_space_end = space->end();
new_top_addr = _space_info[id].new_top_addr();
HeapWord* const clear_end = _space_info[id].new_top();
NOT_PRODUCT(summary_phase_msg(dst_space_id,
space->bottom(), dst_space_end,
SpaceId(id), next_src_addr, space->top());)
......@@ -1865,13 +1904,6 @@ void PSParallelCompact::summary_phase(ParCompactionManager* cm,
new_top_addr);
assert(done, "space must fit when compacted into itself");
assert(*new_top_addr <= space->top(), "usage should not grow");
// XXX - this should go away. See comments above.
//
// Clear the source_region field in regions at the end of the space that
// will not be filled.
HeapWord* const clear_beg = _summary_data.region_align_up(*new_top_addr);
clear_source_region(clear_beg, clear_end);
}
}
......@@ -3051,19 +3083,34 @@ HeapWord* PSParallelCompact::first_src_addr(HeapWord* const dest_addr,
}
void PSParallelCompact::decrement_destination_counts(ParCompactionManager* cm,
SpaceId src_space_id,
size_t beg_region,
HeapWord* end_addr)
{
ParallelCompactData& sd = summary_data();
#ifdef ASSERT
MutableSpace* const src_space = _space_info[src_space_id].space();
HeapWord* const beg_addr = sd.region_to_addr(beg_region);
assert(src_space->contains(beg_addr) || beg_addr == src_space->end(),
"src_space_id does not match beg_addr");
assert(src_space->contains(end_addr) || end_addr == src_space->end(),
"src_space_id does not match end_addr");
#endif // #ifdef ASSERT
RegionData* const beg = sd.region(beg_region);
HeapWord* const end_addr_aligned_up = sd.region_align_up(end_addr);
RegionData* const end = sd.addr_to_region_ptr(end_addr_aligned_up);
size_t cur_idx = beg_region;
for (RegionData* cur = beg; cur < end; ++cur, ++cur_idx) {
RegionData* const end = sd.addr_to_region_ptr(sd.region_align_up(end_addr));
// Regions up to new_top() are enqueued if they become available.
HeapWord* const new_top = _space_info[src_space_id].new_top();
RegionData* const enqueue_end =
sd.addr_to_region_ptr(sd.region_align_up(new_top));
for (RegionData* cur = beg; cur < end; ++cur) {
assert(cur->data_size() > 0, "region must have live data");
cur->decrement_destination_count();
if (cur_idx <= cur->source_region() && cur->available() && cur->claim()) {
cm->save_for_processing(cur_idx);
if (cur < enqueue_end && cur->available() && cur->claim()) {
cm->save_for_processing(sd.region(cur));
}
}
}
......@@ -3178,7 +3225,8 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord* const old_src_addr = closure.source();
closure.copy_partial_obj();
if (closure.is_full()) {
decrement_destination_counts(cm, src_region_idx, closure.source());
decrement_destination_counts(cm, src_space_id, src_region_idx,
closure.source());
region_ptr->set_deferred_obj_addr(NULL);
region_ptr->set_completed();
return;
......@@ -3187,7 +3235,7 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
HeapWord* const end_addr = sd.region_align_down(closure.source());
if (sd.region_align_down(old_src_addr) != end_addr) {
// The partial object was copied from more than one source region.
decrement_destination_counts(cm, src_region_idx, end_addr);
decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
// Move to the next source region, possibly switching spaces as well. All
// args except end_addr may be modified.
......@@ -3227,19 +3275,21 @@ void PSParallelCompact::fill_region(ParCompactionManager* cm, size_t region_idx)
region_ptr->set_deferred_obj_addr(closure.destination());
status = closure.copy_until_full(); // copies from closure.source()
decrement_destination_counts(cm, src_region_idx, closure.source());
decrement_destination_counts(cm, src_space_id, src_region_idx,
closure.source());
region_ptr->set_completed();
return;
}
if (status == ParMarkBitMap::full) {
decrement_destination_counts(cm, src_region_idx, closure.source());
decrement_destination_counts(cm, src_space_id, src_region_idx,
closure.source());
region_ptr->set_deferred_obj_addr(NULL);
region_ptr->set_completed();
return;
}
decrement_destination_counts(cm, src_region_idx, end_addr);
decrement_destination_counts(cm, src_space_id, src_region_idx, end_addr);
// Move to the next source region, possibly switching spaces as well. All
// args except end_addr may be modified.
......@@ -3318,7 +3368,7 @@ void PSParallelCompact::reset_millis_since_last_gc() {
ParMarkBitMap::IterationStatus MoveAndUpdateClosure::copy_until_full()
{
if (source() != destination()) {
assert(source() > destination(), "must copy to the left");
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
Copy::aligned_conjoint_words(source(), destination(), words_remaining());
}
update_state(words_remaining());
......@@ -3339,7 +3389,7 @@ void MoveAndUpdateClosure::copy_partial_obj()
// This test is necessary; if omitted, the pointer updates to a partial object
// that crosses the dense prefix boundary could be overwritten.
if (source() != destination()) {
assert(source() > destination(), "must copy to the left");
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
Copy::aligned_conjoint_words(source(), destination(), words);
}
update_state(words);
......@@ -3364,7 +3414,7 @@ MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) {
}
if (destination() != source()) {
assert(destination() < source(), "must copy to the left");
DEBUG_ONLY(PSParallelCompact::check_new_location(source(), destination());)
Copy::aligned_conjoint_words(source(), destination(), words);
}
......
......@@ -978,6 +978,10 @@ class PSParallelCompact : AllStatic {
// Include the new objects in the summary data.
static void summarize_new_objects(SpaceId id, HeapWord* start);
// Add live objects to a survivor space since it's rare that both survivors
// are non-empty.
static void provoke_split_fill_survivor(SpaceId id);
// Add live objects and/or choose the dense prefix to provoke splitting.
static void provoke_split(bool & maximum_compaction);
#endif
......@@ -1154,8 +1158,10 @@ class PSParallelCompact : AllStatic {
HeapWord* end_addr);
// Decrement the destination count for each non-empty source region in the
// range [beg_region, region(region_align_up(end_addr))).
// range [beg_region, region(region_align_up(end_addr))). If the destination
// count for a region goes to 0 and it needs to be filled, enqueue it.
static void decrement_destination_counts(ParCompactionManager* cm,
SpaceId src_space_id,
size_t beg_region,
HeapWord* end_addr);
......@@ -1230,6 +1236,8 @@ class PSParallelCompact : AllStatic {
#endif // #ifndef PRODUCT
#ifdef ASSERT
// Sanity check the new location of a word in the heap.
static inline void check_new_location(HeapWord* old_addr, HeapWord* new_addr);
// Verify that all the regions have been emptied.
static void verify_complete(SpaceId space_id);
#endif // #ifdef ASSERT
......@@ -1397,6 +1405,15 @@ inline void PSParallelCompact::adjust_pointer(T* p,
}
}
#ifdef ASSERT
inline void
PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr)
{
assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
"must move left or to a different space");
}
#endif // ASSERT
class MoveAndUpdateClosure: public ParMarkBitMapClosure {
public:
inline MoveAndUpdateClosure(ParMarkBitMap* bitmap, ParCompactionManager* cm,
......
......@@ -178,8 +178,7 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words)
// Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::fillerArrayKlassObj(), start,
words);
post_allocation_setup_common(Universe::intArrayKlassObj(), start, words);
DEBUG_ONLY(zap_filler_array(start, words);)
}
......
......@@ -283,7 +283,7 @@ void CardTableModRefBS::resize_covered_region(MemRegion new_region) {
} else {
entry = byte_after(old_region.last());
}
assert(index_for(new_region.last()) < (int) _guard_index,
assert(index_for(new_region.last()) < _guard_index,
"The guard card will be overwritten");
// This line commented out cleans the newly expanded region and
// not the aligned up expanded region.
......
......@@ -428,7 +428,7 @@ public:
}
// Mapping from address to card marking array index.
int index_for(void* p) {
size_t index_for(void* p) {
assert(_whole_heap.contains(p),
"out of bounds access to card marking array");
return byte_for(p) - _byte_map;
......
......@@ -49,7 +49,6 @@ klassOop Universe::_constantPoolKlassObj = NULL;
klassOop Universe::_constantPoolCacheKlassObj = NULL;
klassOop Universe::_compiledICHolderKlassObj = NULL;
klassOop Universe::_systemObjArrayKlassObj = NULL;
klassOop Universe::_fillerArrayKlassObj = NULL;
oop Universe::_int_mirror = NULL;
oop Universe::_float_mirror = NULL;
oop Universe::_double_mirror = NULL;
......@@ -127,7 +126,6 @@ void Universe::system_classes_do(void f(klassOop)) {
f(instanceKlassKlassObj());
f(constantPoolKlassObj());
f(systemObjArrayKlassObj());
f(fillerArrayKlassObj());
}
void Universe::oops_do(OopClosure* f, bool do_all) {
......@@ -182,7 +180,6 @@ void Universe::oops_do(OopClosure* f, bool do_all) {
f->do_oop((oop*)&_constantPoolCacheKlassObj);
f->do_oop((oop*)&_compiledICHolderKlassObj);
f->do_oop((oop*)&_systemObjArrayKlassObj);
f->do_oop((oop*)&_fillerArrayKlassObj);
f->do_oop((oop*)&_the_empty_byte_array);
f->do_oop((oop*)&_the_empty_short_array);
f->do_oop((oop*)&_the_empty_int_array);
......@@ -268,7 +265,6 @@ void Universe::genesis(TRAPS) {
_compiledICHolderKlassObj = compiledICHolderKlass::create_klass(CHECK);
_systemObjArrayKlassObj = objArrayKlassKlass::cast(objArrayKlassKlassObj())->allocate_system_objArray_klass(CHECK);
_fillerArrayKlassObj = typeArrayKlass::create_klass(T_INT, sizeof(jint), "<filler>", CHECK);
_the_empty_byte_array = oopFactory::new_permanent_byteArray(0, CHECK);
_the_empty_short_array = oopFactory::new_permanent_shortArray(0, CHECK);
......
......@@ -133,7 +133,6 @@ class Universe: AllStatic {
static klassOop _constantPoolCacheKlassObj;
static klassOop _compiledICHolderKlassObj;
static klassOop _systemObjArrayKlassObj;
static klassOop _fillerArrayKlassObj;
// Known objects in the VM
......@@ -266,7 +265,6 @@ class Universe: AllStatic {
static klassOop constantPoolCacheKlassObj() { return _constantPoolCacheKlassObj; }
static klassOop compiledICHolderKlassObj() { return _compiledICHolderKlassObj; }
static klassOop systemObjArrayKlassObj() { return _systemObjArrayKlassObj; }
static klassOop fillerArrayKlassObj() { return _fillerArrayKlassObj; }
// Known objects in tbe VM
static oop int_mirror() { return check_mirror(_int_mirror);
......
......@@ -2192,6 +2192,9 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
case Op_DecodeN:
assert(!n->in(1)->is_EncodeP(), "should be optimized out");
// DecodeN could be pinned on Sparc where it can't be fold into
// an address expression, see the code for Op_CastPP above.
assert(n->in(0) == NULL || !Matcher::clone_shift_expressions, "no control except on sparc");
break;
case Op_EncodeP: {
......
......@@ -1724,6 +1724,13 @@ void PhaseMacroExpand::expand_lock_node(LockNode *lock) {
if (klass_node == NULL) {
Node* k_adr = basic_plus_adr(obj, oopDesc::klass_offset_in_bytes());
klass_node = transform_later( LoadKlassNode::make(_igvn, mem, k_adr, _igvn.type(k_adr)->is_ptr()) );
#ifdef _LP64
if (UseCompressedOops && klass_node->is_DecodeN()) {
assert(klass_node->in(1)->Opcode() == Op_LoadNKlass, "sanity");
klass_node->in(1)->init_req(0, ctrl);
} else
#endif
klass_node->init_req(0, ctrl);
}
Node *proto_node = make_load(ctrl, mem, klass_node, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), TypeX_X, TypeX_X->basic_type());
......
......@@ -444,9 +444,9 @@ char* SysClassPath::add_jars_to_path(char* path, const char* directory) {
}
// Parses a memory size specification string.
static bool atomll(const char *s, jlong* result) {
jlong n = 0;
int args_read = sscanf(s, os::jlong_format_specifier(), &n);
static bool atomull(const char *s, julong* result) {
julong n = 0;
int args_read = sscanf(s, os::julong_format_specifier(), &n);
if (args_read != 1) {
return false;
}
......@@ -460,15 +460,20 @@ static bool atomll(const char *s, jlong* result) {
switch (*s) {
case 'T': case 't':
*result = n * G * K;
// Check for overflow.
if (*result/((julong)G * K) != n) return false;
return true;
case 'G': case 'g':
*result = n * G;
if (*result/G != n) return false;
return true;
case 'M': case 'm':
*result = n * M;
if (*result/M != n) return false;
return true;
case 'K': case 'k':
*result = n * K;
if (*result/K != n) return false;
return true;
case '\0':
*result = n;
......@@ -478,10 +483,10 @@ static bool atomll(const char *s, jlong* result) {
}
}
Arguments::ArgsRange Arguments::check_memory_size(jlong size, jlong min_size) {
Arguments::ArgsRange Arguments::check_memory_size(julong size, julong min_size) {
if (size < min_size) return arg_too_small;
// Check that size will fit in a size_t (only relevant on 32-bit)
if ((julong) size > max_uintx) return arg_too_big;
if (size > max_uintx) return arg_too_big;
return arg_in_range;
}
......@@ -522,10 +527,10 @@ static bool set_fp_numeric_flag(char* name, char* value, FlagValueOrigin origin)
static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
jlong v;
julong v;
intx intx_v;
bool is_neg = false;
// Check the sign first since atomll() parses only unsigned values.
// Check the sign first since atomull() parses only unsigned values.
if (*value == '-') {
if (!CommandLineFlags::intxAt(name, &intx_v)) {
return false;
......@@ -533,7 +538,7 @@ static bool set_numeric_flag(char* name, char* value, FlagValueOrigin origin) {
value++;
is_neg = true;
}
if (!atomll(value, &v)) {
if (!atomull(value, &v)) {
return false;
}
intx_v = (intx) v;
......@@ -1677,9 +1682,9 @@ static bool match_option(const JavaVMOption* option, const char** names, const c
}
Arguments::ArgsRange Arguments::parse_memory_size(const char* s,
jlong* long_arg,
jlong min_size) {
if (!atomll(s, long_arg)) return arg_unreadable;
julong* long_arg,
julong min_size) {
if (!atomull(s, long_arg)) return arg_unreadable;
return check_memory_size(*long_arg, min_size);
}
......@@ -1857,7 +1862,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
// -Xmn for compatibility with other JVM vendors
} else if (match_option(option, "-Xmn", &tail)) {
jlong long_initial_eden_size = 0;
julong long_initial_eden_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -1869,7 +1874,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
FLAG_SET_CMDLINE(uintx, NewSize, (size_t) long_initial_eden_size);
// -Xms
} else if (match_option(option, "-Xms", &tail)) {
jlong long_initial_heap_size = 0;
julong long_initial_heap_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_initial_heap_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -1882,7 +1887,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
set_min_heap_size(initial_heap_size());
// -Xmx
} else if (match_option(option, "-Xmx", &tail)) {
jlong long_max_heap_size = 0;
julong long_max_heap_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_max_heap_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -1915,7 +1920,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
}
// -Xss
} else if (match_option(option, "-Xss", &tail)) {
jlong long_ThreadStackSize = 0;
julong long_ThreadStackSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_ThreadStackSize, 1000);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -1931,9 +1936,9 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
// HotSpot does not have separate native and Java stacks, ignore silently for compatibility
// -Xmaxjitcodesize
} else if (match_option(option, "-Xmaxjitcodesize", &tail)) {
jlong long_ReservedCodeCacheSize = 0;
julong long_ReservedCodeCacheSize = 0;
ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize,
InitialCodeCacheSize);
(size_t)InitialCodeCacheSize);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
"Invalid maximum code cache size: %s\n",
......@@ -2238,7 +2243,7 @@ jint Arguments::parse_each_vm_init_arg(const JavaVMInitArgs* args,
} else if (match_option(option, "-XX:TLEFragmentationRatio=", &tail)) {
// No longer used.
} else if (match_option(option, "-XX:TLESize=", &tail)) {
jlong long_tlab_size = 0;
julong long_tlab_size = 0;
ArgsRange errcode = parse_memory_size(tail, &long_tlab_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -2293,7 +2298,7 @@ SOLARIS_ONLY(
"-XX:ParCMSPromoteBlocksToClaim in the future\n");
} else
if (match_option(option, "-XX:ParallelGCOldGenAllocBufferSize=", &tail)) {
jlong old_plab_size = 0;
julong old_plab_size = 0;
ArgsRange errcode = parse_memory_size(tail, &old_plab_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -2301,13 +2306,13 @@ SOLARIS_ONLY(
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, OldPLABSize, (julong)old_plab_size);
FLAG_SET_CMDLINE(uintx, OldPLABSize, old_plab_size);
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:OldPLABSize in place of "
"-XX:ParallelGCOldGenAllocBufferSize in the future\n");
} else
if (match_option(option, "-XX:ParallelGCToSpaceAllocBufferSize=", &tail)) {
jlong young_plab_size = 0;
julong young_plab_size = 0;
ArgsRange errcode = parse_memory_size(tail, &young_plab_size, 1);
if (errcode != arg_in_range) {
jio_fprintf(defaultStream::error_stream(),
......@@ -2315,7 +2320,7 @@ SOLARIS_ONLY(
describe_range_error(errcode);
return JNI_EINVAL;
}
FLAG_SET_CMDLINE(uintx, YoungPLABSize, (julong)young_plab_size);
FLAG_SET_CMDLINE(uintx, YoungPLABSize, young_plab_size);
jio_fprintf(defaultStream::error_stream(),
"Please use -XX:YoungPLABSize in place of "
"-XX:ParallelGCToSpaceAllocBufferSize in the future\n");
......
......@@ -339,9 +339,9 @@ class Arguments : AllStatic {
}
static bool verify_percentage(uintx value, const char* name);
static void describe_range_error(ArgsRange errcode);
static ArgsRange check_memory_size(jlong size, jlong min_size);
static ArgsRange parse_memory_size(const char* s, jlong* long_arg,
jlong min_size);
static ArgsRange check_memory_size(julong size, julong min_size);
static ArgsRange parse_memory_size(const char* s, julong* long_arg,
julong min_size);
// methods to build strings from individual args
static void build_jvm_args(const char* arg);
......
......@@ -694,10 +694,10 @@ JVM_ENTRY(jlong, jmm_SetPoolThreshold(JNIEnv* env, jobject obj, jmmThresholdType
-1);
}
if (threshold > max_intx) {
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(),
"Invalid threshold value > max value of size_t",
-1);
if ((size_t)threshold > max_uintx) {
stringStream st;
st.print("Invalid valid threshold value. Threshold value (" UINT64_FORMAT ") > max value of size_t (" SIZE_FORMAT ")", (size_t)threshold, max_uintx);
THROW_MSG_(vmSymbols::java_lang_IllegalArgumentException(), st.as_string(), -1);
}
MemoryPool* pool = get_memory_pool_from_jobject(obj, CHECK_(0L));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册