提交 25cac386 编写于 作者: A amurillo

Merge

...@@ -359,6 +359,12 @@ public class InstanceKlass extends Klass { ...@@ -359,6 +359,12 @@ public class InstanceKlass extends Klass {
public static final int innerClassNextOffset = 4; public static final int innerClassNextOffset = 4;
}; };
public static interface EnclosingMethodAttributeOffset {
public static final int enclosing_method_class_index_offset = 0;
public static final int enclosing_method_method_index_offset = 1;
public static final int enclosing_method_attribute_size = 2;
};
// refer to compute_modifier_flags in VM code. // refer to compute_modifier_flags in VM code.
public long computeModifierFlags() { public long computeModifierFlags() {
long access = getAccessFlags(); long access = getAccessFlags();
...@@ -367,9 +373,14 @@ public class InstanceKlass extends Klass { ...@@ -367,9 +373,14 @@ public class InstanceKlass extends Klass {
int length = ( innerClassList == null)? 0 : (int) innerClassList.getLength(); int length = ( innerClassList == null)? 0 : (int) innerClassList.getLength();
if (length > 0) { if (length > 0) {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0, "just checking"); Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0 ||
length % InnerClassAttributeOffset.innerClassNextOffset == EnclosingMethodAttributeOffset.enclosing_method_attribute_size,
"just checking");
} }
for (int i = 0; i < length; i += InnerClassAttributeOffset.innerClassNextOffset) { for (int i = 0; i < length; i += InnerClassAttributeOffset.innerClassNextOffset) {
if (i == length - EnclosingMethodAttributeOffset.enclosing_method_attribute_size) {
break;
}
int ioff = innerClassList.getShortAt(i + int ioff = innerClassList.getShortAt(i +
InnerClassAttributeOffset.innerClassInnerClassInfoOffset); InnerClassAttributeOffset.innerClassInnerClassInfoOffset);
// 'ioff' can be zero. // 'ioff' can be zero.
...@@ -419,9 +430,14 @@ public class InstanceKlass extends Klass { ...@@ -419,9 +430,14 @@ public class InstanceKlass extends Klass {
int length = ( innerClassList == null)? 0 : (int) innerClassList.getLength(); int length = ( innerClassList == null)? 0 : (int) innerClassList.getLength();
if (length > 0) { if (length > 0) {
if (Assert.ASSERTS_ENABLED) { if (Assert.ASSERTS_ENABLED) {
Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0, "just checking"); Assert.that(length % InnerClassAttributeOffset.innerClassNextOffset == 0 ||
length % InnerClassAttributeOffset.innerClassNextOffset == EnclosingMethodAttributeOffset.enclosing_method_attribute_size,
"just checking");
} }
for (int i = 0; i < length; i += InnerClassAttributeOffset.innerClassNextOffset) { for (int i = 0; i < length; i += InnerClassAttributeOffset.innerClassNextOffset) {
if (i == length - EnclosingMethodAttributeOffset.enclosing_method_attribute_size) {
break;
}
int ioff = innerClassList.getShortAt(i + int ioff = innerClassList.getShortAt(i +
InnerClassAttributeOffset.innerClassInnerClassInfoOffset); InnerClassAttributeOffset.innerClassInnerClassInfoOffset);
// 'ioff' can be zero. // 'ioff' can be zero.
......
...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011 ...@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2011
HS_MAJOR_VER=24 HS_MAJOR_VER=24
HS_MINOR_VER=0 HS_MINOR_VER=0
HS_BUILD_NUMBER=05 HS_BUILD_NUMBER=06
JDK_MAJOR_VER=1 JDK_MAJOR_VER=1
JDK_MINOR_VER=8 JDK_MINOR_VER=8
......
...@@ -446,6 +446,7 @@ jprt.test.targets.embedded= \ ...@@ -446,6 +446,7 @@ jprt.test.targets.embedded= \
jprt.test.targets.jdk8=${jprt.test.targets.standard} jprt.test.targets.jdk8=${jprt.test.targets.standard}
jprt.test.targets.jdk7=${jprt.test.targets.standard} jprt.test.targets.jdk7=${jprt.test.targets.standard}
jprt.test.targets.jdk7u4=${jprt.test.targets.jdk7}
jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}} jprt.test.targets=${jprt.test.targets.${jprt.tools.default.release}}
# The default test/Makefile targets that should be run # The default test/Makefile targets that should be run
...@@ -505,5 +506,6 @@ jprt.make.rule.test.targets.embedded = \ ...@@ -505,5 +506,6 @@ jprt.make.rule.test.targets.embedded = \
jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk8=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard} jprt.make.rule.test.targets.jdk7=${jprt.make.rule.test.targets.standard}
jprt.make.rule.test.targets.jdk7u4=${jprt.make.rule.test.targets.jdk7}
jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}} jprt.make.rule.test.targets=${jprt.make.rule.test.targets.${jprt.tools.default.release}}
...@@ -2315,13 +2315,32 @@ void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantP ...@@ -2315,13 +2315,32 @@ void ClassFileParser::parse_classfile_source_debug_extension_attribute(constantP
#define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC) #define RECOGNIZED_INNER_CLASS_MODIFIERS (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_PRIVATE | JVM_ACC_PROTECTED | JVM_ACC_STATIC)
// Return number of classes in the inner classes attribute table // Return number of classes in the inner classes attribute table
u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS) { u2 ClassFileParser::parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
bool parsed_enclosingmethod_attribute,
u2 enclosing_method_class_index,
u2 enclosing_method_method_index,
constantPoolHandle cp,
instanceKlassHandle k, TRAPS) {
ClassFileStream* cfs = stream(); ClassFileStream* cfs = stream();
cfs->guarantee_more(2, CHECK_0); // length u1* current_mark = cfs->current();
u2 length = cfs->get_u2_fast(); u2 length = 0;
if (inner_classes_attribute_start != NULL) {
cfs->set_current(inner_classes_attribute_start);
cfs->guarantee_more(2, CHECK_0); // length
length = cfs->get_u2_fast();
}
// 4-tuples of shorts [inner_class_info_index, outer_class_info_index, inner_name_index, inner_class_access_flags] // 4-tuples of shorts of inner classes data and 2 shorts of enclosing
typeArrayOop ic = oopFactory::new_permanent_shortArray(length*4, CHECK_0); // method data:
// [inner_class_info_index,
// outer_class_info_index,
// inner_name_index,
// inner_class_access_flags,
// ...
// enclosing_method_class_index,
// enclosing_method_method_index]
int size = length * 4 + (parsed_enclosingmethod_attribute ? 2 : 0);
typeArrayOop ic = oopFactory::new_permanent_shortArray(size, CHECK_0);
typeArrayHandle inner_classes(THREAD, ic); typeArrayHandle inner_classes(THREAD, ic);
int index = 0; int index = 0;
int cp_size = cp->length(); int cp_size = cp->length();
...@@ -2372,8 +2391,8 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle c ...@@ -2372,8 +2391,8 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle c
// 4347400: make sure there's no duplicate entry in the classes array // 4347400: make sure there's no duplicate entry in the classes array
if (_need_verify && _major_version >= JAVA_1_5_VERSION) { if (_need_verify && _major_version >= JAVA_1_5_VERSION) {
for(int i = 0; i < inner_classes->length(); i += 4) { for(int i = 0; i < length * 4; i += 4) {
for(int j = i + 4; j < inner_classes->length(); j += 4) { for(int j = i + 4; j < length * 4; j += 4) {
guarantee_property((inner_classes->ushort_at(i) != inner_classes->ushort_at(j) || guarantee_property((inner_classes->ushort_at(i) != inner_classes->ushort_at(j) ||
inner_classes->ushort_at(i+1) != inner_classes->ushort_at(j+1) || inner_classes->ushort_at(i+1) != inner_classes->ushort_at(j+1) ||
inner_classes->ushort_at(i+2) != inner_classes->ushort_at(j+2) || inner_classes->ushort_at(i+2) != inner_classes->ushort_at(j+2) ||
...@@ -2384,8 +2403,19 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle c ...@@ -2384,8 +2403,19 @@ u2 ClassFileParser::parse_classfile_inner_classes_attribute(constantPoolHandle c
} }
} }
// Set EnclosingMethod class and method indexes.
if (parsed_enclosingmethod_attribute) {
inner_classes->short_at_put(index++, enclosing_method_class_index);
inner_classes->short_at_put(index++, enclosing_method_method_index);
}
assert(index == size, "wrong size");
// Update instanceKlass with inner class info. // Update instanceKlass with inner class info.
k->set_inner_classes(inner_classes()); k->set_inner_classes(inner_classes());
// Restore buffer's current position.
cfs->set_current(current_mark);
return length; return length;
} }
...@@ -2490,6 +2520,10 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance ...@@ -2490,6 +2520,10 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
int runtime_visible_annotations_length = 0; int runtime_visible_annotations_length = 0;
u1* runtime_invisible_annotations = NULL; u1* runtime_invisible_annotations = NULL;
int runtime_invisible_annotations_length = 0; int runtime_invisible_annotations_length = 0;
u1* inner_classes_attribute_start = NULL;
u4 inner_classes_attribute_length = 0;
u2 enclosing_method_class_index = 0;
u2 enclosing_method_method_index = 0;
// Iterate over attributes // Iterate over attributes
while (attributes_count--) { while (attributes_count--) {
cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length cfs->guarantee_more(6, CHECK); // attribute_name_index, attribute_length
...@@ -2522,11 +2556,9 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance ...@@ -2522,11 +2556,9 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
} else { } else {
parsed_innerclasses_attribute = true; parsed_innerclasses_attribute = true;
} }
u2 num_of_classes = parse_classfile_inner_classes_attribute(cp, k, CHECK); inner_classes_attribute_start = cfs->get_u1_buffer();
if (_need_verify && _major_version >= JAVA_1_5_VERSION) { inner_classes_attribute_length = attribute_length;
guarantee_property(attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes, cfs->skip_u1(inner_classes_attribute_length, CHECK);
"Wrong InnerClasses attribute length in class file %s", CHECK);
}
} else if (tag == vmSymbols::tag_synthetic()) { } else if (tag == vmSymbols::tag_synthetic()) {
// Check for Synthetic tag // Check for Synthetic tag
// Shouldn't we check that the synthetic flags wasn't already set? - not required in spec // Shouldn't we check that the synthetic flags wasn't already set? - not required in spec
...@@ -2568,22 +2600,21 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance ...@@ -2568,22 +2600,21 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
parsed_enclosingmethod_attribute = true; parsed_enclosingmethod_attribute = true;
} }
cfs->guarantee_more(4, CHECK); // class_index, method_index cfs->guarantee_more(4, CHECK); // class_index, method_index
u2 class_index = cfs->get_u2_fast(); enclosing_method_class_index = cfs->get_u2_fast();
u2 method_index = cfs->get_u2_fast(); enclosing_method_method_index = cfs->get_u2_fast();
if (class_index == 0) { if (enclosing_method_class_index == 0) {
classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", CHECK); classfile_parse_error("Invalid class index in EnclosingMethod attribute in class file %s", CHECK);
} }
// Validate the constant pool indices and types // Validate the constant pool indices and types
if (!cp->is_within_bounds(class_index) || if (!cp->is_within_bounds(enclosing_method_class_index) ||
!is_klass_reference(cp, class_index)) { !is_klass_reference(cp, enclosing_method_class_index)) {
classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK); classfile_parse_error("Invalid or out-of-bounds class index in EnclosingMethod attribute in class file %s", CHECK);
} }
if (method_index != 0 && if (enclosing_method_method_index != 0 &&
(!cp->is_within_bounds(method_index) || (!cp->is_within_bounds(enclosing_method_method_index) ||
!cp->tag_at(method_index).is_name_and_type())) { !cp->tag_at(enclosing_method_method_index).is_name_and_type())) {
classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK); classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
} }
k->set_enclosing_method_indices(class_index, method_index);
} else if (tag == vmSymbols::tag_bootstrap_methods() && } else if (tag == vmSymbols::tag_bootstrap_methods() &&
_major_version >= Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { _major_version >= Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
if (parsed_bootstrap_methods_attribute) if (parsed_bootstrap_methods_attribute)
...@@ -2606,6 +2637,20 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance ...@@ -2606,6 +2637,20 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
CHECK); CHECK);
k->set_class_annotations(annotations()); k->set_class_annotations(annotations());
if (parsed_innerclasses_attribute || parsed_enclosingmethod_attribute) {
u2 num_of_classes = parse_classfile_inner_classes_attribute(
inner_classes_attribute_start,
parsed_innerclasses_attribute,
enclosing_method_class_index,
enclosing_method_method_index,
cp, k, CHECK);
if (parsed_innerclasses_attribute &&_need_verify && _major_version >= JAVA_1_5_VERSION) {
guarantee_property(
inner_classes_attribute_length == sizeof(num_of_classes) + 4 * sizeof(u2) * num_of_classes,
"Wrong InnerClasses attribute length in class file %s", CHECK);
}
}
if (_max_bootstrap_specifier_index >= 0) { if (_max_bootstrap_specifier_index >= 0) {
guarantee_property(parsed_bootstrap_methods_attribute, guarantee_property(parsed_bootstrap_methods_attribute,
"Missing BootstrapMethods attribute in class file %s", CHECK); "Missing BootstrapMethods attribute in class file %s", CHECK);
......
...@@ -130,7 +130,11 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC { ...@@ -130,7 +130,11 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS); void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
instanceKlassHandle k, int length, TRAPS); instanceKlassHandle k, int length, TRAPS);
u2 parse_classfile_inner_classes_attribute(constantPoolHandle cp, u2 parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
bool parsed_enclosingmethod_attribute,
u2 enclosing_method_class_index,
u2 enclosing_method_method_index,
constantPoolHandle cp,
instanceKlassHandle k, TRAPS); instanceKlassHandle k, TRAPS);
void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS); void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS); void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
......
...@@ -285,7 +285,7 @@ size_t CollectionSetChooser::calcMinOldCSetLength() { ...@@ -285,7 +285,7 @@ size_t CollectionSetChooser::calcMinOldCSetLength() {
// that the result is the same during all mixed GCs that follow a cycle. // that the result is the same during all mixed GCs that follow a cycle.
const size_t region_num = (size_t) _length; const size_t region_num = (size_t) _length;
const size_t gc_num = (size_t) G1MaxMixedGCNum; const size_t gc_num = (size_t) G1MixedGCCountTarget;
size_t result = region_num / gc_num; size_t result = region_num / gc_num;
// emulate ceiling // emulate ceiling
if (result * gc_num < region_num) { if (result * gc_num < region_num) {
......
...@@ -155,7 +155,7 @@ void ConcurrentMarkThread::run() { ...@@ -155,7 +155,7 @@ void ConcurrentMarkThread::run() {
CMCheckpointRootsFinalClosure final_cl(_cm); CMCheckpointRootsFinalClosure final_cl(_cm);
sprintf(verbose_str, "GC remark"); sprintf(verbose_str, "GC remark");
VM_CGC_Operation op(&final_cl, verbose_str); VM_CGC_Operation op(&final_cl, verbose_str, true /* needs_pll */);
VMThread::execute(&op); VMThread::execute(&op);
} }
if (cm()->restart_for_overflow() && if (cm()->restart_for_overflow() &&
...@@ -189,7 +189,7 @@ void ConcurrentMarkThread::run() { ...@@ -189,7 +189,7 @@ void ConcurrentMarkThread::run() {
CMCleanUp cl_cl(_cm); CMCleanUp cl_cl(_cm);
sprintf(verbose_str, "GC cleanup"); sprintf(verbose_str, "GC cleanup");
VM_CGC_Operation op(&cl_cl, verbose_str); VM_CGC_Operation op(&cl_cl, verbose_str, false /* needs_pll */);
VMThread::execute(&op); VMThread::execute(&op);
} else { } else {
// We don't want to update the marking status if a GC pause // We don't want to update the marking status if a GC pause
......
...@@ -993,7 +993,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, ...@@ -993,7 +993,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
// iteration (after taking the Heap_lock). // iteration (after taking the Heap_lock).
result = _mutator_alloc_region.attempt_allocation(word_size, result = _mutator_alloc_region.attempt_allocation(word_size,
false /* bot_updates */); false /* bot_updates */);
if (result != NULL ){ if (result != NULL) {
return result; return result;
} }
...@@ -2437,20 +2437,22 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { ...@@ -2437,20 +2437,22 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
true, /* should_initiate_conc_mark */ true, /* should_initiate_conc_mark */
g1_policy()->max_pause_time_ms(), g1_policy()->max_pause_time_ms(),
cause); cause);
VMThread::execute(&op); VMThread::execute(&op);
if (!op.pause_succeeded()) { if (!op.pause_succeeded()) {
// Another GC got scheduled and prevented us from scheduling
// the initial-mark GC. It's unlikely that the GC that
// pre-empted us was also an initial-mark GC. So, we'll retry
// the initial-mark GC.
if (full_gc_count_before == total_full_collections()) { if (full_gc_count_before == total_full_collections()) {
retry_gc = true; retry_gc = op.should_retry_gc();
} else { } else {
// A Full GC happened while we were trying to schedule the // A Full GC happened while we were trying to schedule the
// initial-mark GC. No point in starting a new cycle given // initial-mark GC. No point in starting a new cycle given
// that the whole heap was collected anyway. // that the whole heap was collected anyway.
} }
if (retry_gc) {
if (GC_locker::is_active_and_needs_gc()) {
GC_locker::stall_until_clear();
}
}
} }
} else { } else {
if (cause == GCCause::_gc_locker if (cause == GCCause::_gc_locker
......
...@@ -2608,7 +2608,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str, ...@@ -2608,7 +2608,7 @@ bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes(); size_t reclaimable_bytes = cset_chooser->remainingReclaimableBytes();
size_t capacity_bytes = _g1->capacity(); size_t capacity_bytes = _g1->capacity();
double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes; double perc = (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
double threshold = (double) G1OldReclaimableThresholdPercent; double threshold = (double) G1HeapWastePercent;
if (perc < threshold) { if (perc < threshold) {
ergo_verbose4(ErgoMixedGCs, ergo_verbose4(ErgoMixedGCs,
false_action_str, false_action_str,
......
...@@ -940,10 +940,9 @@ public: ...@@ -940,10 +940,9 @@ public:
return _bytes_copied_during_gc; return _bytes_copied_during_gc;
} }
// Determine whether the next GC should be mixed. Called to determine // Determine whether there are candidate regions so that the
// whether to start mixed GCs or whether to carry on doing mixed // next GC should be mixed. The two action strings are used
// GCs. The two action strings are used in the ergo output when the // in the ergo output when the method returns true or false.
// method returns true or false.
bool next_gc_should_be_mixed(const char* true_action_str, bool next_gc_should_be_mixed(const char* true_action_str,
const char* false_action_str); const char* false_action_str);
......
...@@ -299,17 +299,16 @@ ...@@ -299,17 +299,16 @@
"Percentage (0-100) of the heap size to use as maximum " \ "Percentage (0-100) of the heap size to use as maximum " \
"young gen size.") \ "young gen size.") \
\ \
develop(uintx, G1OldCSetRegionLiveThresholdPercent, 95, \ develop(uintx, G1OldCSetRegionLiveThresholdPercent, 90, \
"Threshold for regions to be added to the collection set. " \ "Threshold for regions to be added to the collection set. " \
"Regions with more live bytes that this will not be collected.") \ "Regions with more live bytes that this will not be collected.") \
\ \
develop(uintx, G1OldReclaimableThresholdPercent, 1, \ product(uintx, G1HeapWastePercent, 5, \
"Threshold for the remaining old reclaimable bytes, expressed " \ "Amount of space, expressed as a percentage of the heap size, " \
"as a percentage of the heap size. If the old reclaimable bytes " \ "that G1 is willing not to collect to avoid expensive GCs.") \
"are under this we will not collect them with more mixed GCs.") \
\ \
develop(uintx, G1MaxMixedGCNum, 4, \ product(uintx, G1MixedGCCountTarget, 4, \
"The maximum desired number of mixed GCs after a marking cycle.") \ "The target number of mixed GCs after a marking cycle.") \
\ \
develop(uintx, G1OldCSetRegionThresholdPercent, 10, \ develop(uintx, G1OldCSetRegionThresholdPercent, 10, \
"An upper bound for the number of old CSet regions expressed " \ "An upper bound for the number of old CSet regions expressed " \
......
...@@ -34,7 +34,8 @@ ...@@ -34,7 +34,8 @@
VM_G1CollectForAllocation::VM_G1CollectForAllocation( VM_G1CollectForAllocation::VM_G1CollectForAllocation(
unsigned int gc_count_before, unsigned int gc_count_before,
size_t word_size) size_t word_size)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size) { : VM_G1OperationWithAllocRequest(gc_count_before, word_size,
GCCause::_allocation_failure) {
guarantee(word_size > 0, "an allocation should always be requested"); guarantee(word_size > 0, "an allocation should always be requested");
} }
...@@ -57,9 +58,10 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause( ...@@ -57,9 +58,10 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
bool should_initiate_conc_mark, bool should_initiate_conc_mark,
double target_pause_time_ms, double target_pause_time_ms,
GCCause::Cause gc_cause) GCCause::Cause gc_cause)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size), : VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
_should_initiate_conc_mark(should_initiate_conc_mark), _should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms), _target_pause_time_ms(target_pause_time_ms),
_should_retry_gc(false),
_full_collections_completed_before(0) { _full_collections_completed_before(0) {
guarantee(target_pause_time_ms > 0.0, guarantee(target_pause_time_ms > 0.0,
err_msg("target_pause_time_ms = %1.6lf should be positive", err_msg("target_pause_time_ms = %1.6lf should be positive",
...@@ -70,6 +72,22 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause( ...@@ -70,6 +72,22 @@ VM_G1IncCollectionPause::VM_G1IncCollectionPause(
_gc_cause = gc_cause; _gc_cause = gc_cause;
} }
bool VM_G1IncCollectionPause::doit_prologue() {
bool res = VM_GC_Operation::doit_prologue();
if (!res) {
if (_should_initiate_conc_mark) {
// The prologue can fail for a couple of reasons. The first is that another GC
// got scheduled and prevented the scheduling of the initial mark GC. The
// second is that the GC locker may be active and the heap can't be expanded.
// In both cases we want to retry the GC so that the initial mark pause is
// actually scheduled. In the second case, however, we should stall until
// until the GC locker is no longer active and then retry the initial mark GC.
_should_retry_gc = true;
}
}
return res;
}
void VM_G1IncCollectionPause::doit() { void VM_G1IncCollectionPause::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap(); G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(!_should_initiate_conc_mark || assert(!_should_initiate_conc_mark ||
...@@ -106,11 +124,25 @@ void VM_G1IncCollectionPause::doit() { ...@@ -106,11 +124,25 @@ void VM_G1IncCollectionPause::doit() {
// next GC pause to be an initial mark; it returns false if a // next GC pause to be an initial mark; it returns false if a
// marking cycle is already in progress. // marking cycle is already in progress.
// //
// If a marking cycle is already in progress just return and skip // If a marking cycle is already in progress just return and skip the
// the pause - the requesting thread should block in doit_epilogue // pause below - if the reason for requesting this initial mark pause
// until the marking cycle is complete. // was due to a System.gc() then the requesting thread should block in
// doit_epilogue() until the marking cycle is complete.
//
// If this initial mark pause was requested as part of a humongous
// allocation then we know that the marking cycle must just have
// been started by another thread (possibly also allocating a humongous
// object) as there was no active marking cycle when the requesting
// thread checked before calling collect() in
// attempt_allocation_humongous(). Retrying the GC, in this case,
// will cause the requesting thread to spin inside collect() until the
// just started marking cycle is complete - which may be a while. So
// we do NOT retry the GC.
if (!res) { if (!res) {
assert(_word_size == 0, "ExplicitGCInvokesConcurrent shouldn't be allocating"); assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating");
if (_gc_cause != GCCause::_g1_humongous_allocation) {
_should_retry_gc = true;
}
return; return;
} }
} }
...@@ -123,6 +155,13 @@ void VM_G1IncCollectionPause::doit() { ...@@ -123,6 +155,13 @@ void VM_G1IncCollectionPause::doit() {
true /* expect_null_cur_alloc_region */); true /* expect_null_cur_alloc_region */);
} else { } else {
assert(_result == NULL, "invariant"); assert(_result == NULL, "invariant");
if (!_pause_succeeded) {
// Another possible reason reason for the pause to not be successful
// is that, again, the GC locker is active (and has become active
// since the prologue was executed). In this case we should retry
// the pause after waiting for the GC locker to become inactive.
_should_retry_gc = true;
}
} }
} }
...@@ -168,6 +207,7 @@ void VM_G1IncCollectionPause::doit_epilogue() { ...@@ -168,6 +207,7 @@ void VM_G1IncCollectionPause::doit_epilogue() {
} }
void VM_CGC_Operation::acquire_pending_list_lock() { void VM_CGC_Operation::acquire_pending_list_lock() {
assert(_needs_pll, "don't call this otherwise");
// The caller may block while communicating // The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL. // with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkThread::slt()-> ConcurrentMarkThread::slt()->
...@@ -175,6 +215,7 @@ void VM_CGC_Operation::acquire_pending_list_lock() { ...@@ -175,6 +215,7 @@ void VM_CGC_Operation::acquire_pending_list_lock() {
} }
void VM_CGC_Operation::release_and_notify_pending_list_lock() { void VM_CGC_Operation::release_and_notify_pending_list_lock() {
assert(_needs_pll, "don't call this otherwise");
// The caller may block while communicating // The caller may block while communicating
// with the SLT thread in order to acquire/release the PLL. // with the SLT thread in order to acquire/release the PLL.
ConcurrentMarkThread::slt()-> ConcurrentMarkThread::slt()->
...@@ -198,7 +239,9 @@ void VM_CGC_Operation::doit() { ...@@ -198,7 +239,9 @@ void VM_CGC_Operation::doit() {
bool VM_CGC_Operation::doit_prologue() { bool VM_CGC_Operation::doit_prologue() {
// Note the relative order of the locks must match that in // Note the relative order of the locks must match that in
// VM_GC_Operation::doit_prologue() or deadlocks can occur // VM_GC_Operation::doit_prologue() or deadlocks can occur
acquire_pending_list_lock(); if (_needs_pll) {
acquire_pending_list_lock();
}
Heap_lock->lock(); Heap_lock->lock();
SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true; SharedHeap::heap()->_thread_holds_heap_lock_for_gc = true;
...@@ -210,5 +253,7 @@ void VM_CGC_Operation::doit_epilogue() { ...@@ -210,5 +253,7 @@ void VM_CGC_Operation::doit_epilogue() {
// VM_GC_Operation::doit_epilogue() // VM_GC_Operation::doit_epilogue()
SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false; SharedHeap::heap()->_thread_holds_heap_lock_for_gc = false;
Heap_lock->unlock(); Heap_lock->unlock();
release_and_notify_pending_list_lock(); if (_needs_pll) {
release_and_notify_pending_list_lock();
}
} }
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -43,8 +43,9 @@ protected: ...@@ -43,8 +43,9 @@ protected:
public: public:
VM_G1OperationWithAllocRequest(unsigned int gc_count_before, VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
size_t word_size) size_t word_size,
: VM_GC_Operation(gc_count_before, GCCause::_allocation_failure), GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause),
_word_size(word_size), _result(NULL), _pause_succeeded(false) { } _word_size(word_size), _result(NULL), _pause_succeeded(false) { }
HeapWord* result() { return _result; } HeapWord* result() { return _result; }
bool pause_succeeded() { return _pause_succeeded; } bool pause_succeeded() { return _pause_succeeded; }
...@@ -77,6 +78,7 @@ public: ...@@ -77,6 +78,7 @@ public:
class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest { class VM_G1IncCollectionPause: public VM_G1OperationWithAllocRequest {
private: private:
bool _should_initiate_conc_mark; bool _should_initiate_conc_mark;
bool _should_retry_gc;
double _target_pause_time_ms; double _target_pause_time_ms;
unsigned int _full_collections_completed_before; unsigned int _full_collections_completed_before;
public: public:
...@@ -86,11 +88,13 @@ public: ...@@ -86,11 +88,13 @@ public:
double target_pause_time_ms, double target_pause_time_ms,
GCCause::Cause gc_cause); GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; } virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
virtual bool doit_prologue();
virtual void doit(); virtual void doit();
virtual void doit_epilogue(); virtual void doit_epilogue();
virtual const char* name() const { virtual const char* name() const {
return "garbage-first incremental collection pause"; return "garbage-first incremental collection pause";
} }
bool should_retry_gc() const { return _should_retry_gc; }
}; };
// Concurrent GC stop-the-world operations such as remark and cleanup; // Concurrent GC stop-the-world operations such as remark and cleanup;
...@@ -98,6 +102,7 @@ public: ...@@ -98,6 +102,7 @@ public:
class VM_CGC_Operation: public VM_Operation { class VM_CGC_Operation: public VM_Operation {
VoidClosure* _cl; VoidClosure* _cl;
const char* _printGCMessage; const char* _printGCMessage;
bool _needs_pll;
protected: protected:
// java.lang.ref.Reference support // java.lang.ref.Reference support
...@@ -105,8 +110,8 @@ protected: ...@@ -105,8 +110,8 @@ protected:
void release_and_notify_pending_list_lock(); void release_and_notify_pending_list_lock();
public: public:
VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg) VM_CGC_Operation(VoidClosure* cl, const char *printGCMsg, bool needs_pll)
: _cl(cl), _printGCMessage(printGCMsg) { } : _cl(cl), _printGCMessage(printGCMsg), _needs_pll(needs_pll) { }
virtual VMOp_Type type() const { return VMOp_CGC_Operation; } virtual VMOp_Type type() const { return VMOp_CGC_Operation; }
virtual void doit(); virtual void doit();
virtual bool doit_prologue(); virtual bool doit_prologue();
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONMANAGER_INLINE_HPP
#include "gc_implementation/parallelScavenge/psOldGen.hpp"
#include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.hpp"
#include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp"
......
/* /*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2006, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -91,29 +91,37 @@ void MutableNUMASpace::ensure_parsability() { ...@@ -91,29 +91,37 @@ void MutableNUMASpace::ensure_parsability() {
MutableSpace *s = ls->space(); MutableSpace *s = ls->space();
if (s->top() < top()) { // For all spaces preceding the one containing top() if (s->top() < top()) { // For all spaces preceding the one containing top()
if (s->free_in_words() > 0) { if (s->free_in_words() > 0) {
size_t area_touched_words = pointer_delta(s->end(), s->top()); intptr_t cur_top = (intptr_t)s->top();
CollectedHeap::fill_with_object(s->top(), area_touched_words); size_t words_left_to_fill = pointer_delta(s->end(), s->top());;
while (words_left_to_fill > 0) {
size_t words_to_fill = MIN2(words_left_to_fill, CollectedHeap::filler_array_max_size());
assert(words_to_fill >= CollectedHeap::min_fill_size(),
err_msg("Remaining size ("SIZE_FORMAT ") is too small to fill (based on " SIZE_FORMAT " and " SIZE_FORMAT ")",
words_to_fill, words_left_to_fill, CollectedHeap::filler_array_max_size()));
CollectedHeap::fill_with_object((HeapWord*)cur_top, words_to_fill);
if (!os::numa_has_static_binding()) {
size_t touched_words = words_to_fill;
#ifndef ASSERT #ifndef ASSERT
if (!ZapUnusedHeapArea) { if (!ZapUnusedHeapArea) {
area_touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)), touched_words = MIN2((size_t)align_object_size(typeArrayOopDesc::header_size(T_INT)),
area_touched_words); touched_words);
} }
#endif #endif
if (!os::numa_has_static_binding()) { MemRegion invalid;
MemRegion invalid; HeapWord *crossing_start = (HeapWord*)round_to(cur_top, os::vm_page_size());
HeapWord *crossing_start = (HeapWord*)round_to((intptr_t)s->top(), os::vm_page_size()); HeapWord *crossing_end = (HeapWord*)round_to(cur_top + touched_words, os::vm_page_size());
HeapWord *crossing_end = (HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), if (crossing_start != crossing_end) {
os::vm_page_size()); // If object header crossed a small page boundary we mark the area
if (crossing_start != crossing_end) { // as invalid rounding it to a page_size().
// If object header crossed a small page boundary we mark the area HeapWord *start = MAX2((HeapWord*)round_down(cur_top, page_size()), s->bottom());
// as invalid rounding it to a page_size(). HeapWord *end = MIN2((HeapWord*)round_to(cur_top + touched_words, page_size()), s->end());
HeapWord *start = MAX2((HeapWord*)round_down((intptr_t)s->top(), page_size()), s->bottom()); invalid = MemRegion(start, end);
HeapWord *end = MIN2((HeapWord*)round_to((intptr_t)(s->top() + area_touched_words), page_size()), }
s->end());
invalid = MemRegion(start, end);
}
ls->add_invalid_region(invalid); ls->add_invalid_region(invalid);
}
cur_top = cur_top + (words_to_fill * HeapWordSize);
words_left_to_fill -= words_to_fill;
} }
} }
} else { } else {
......
...@@ -85,7 +85,7 @@ CollectedHeap::CollectedHeap() : _n_par_threads(0) ...@@ -85,7 +85,7 @@ CollectedHeap::CollectedHeap() : _n_par_threads(0)
const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT)); const size_t max_len = size_t(arrayOopDesc::max_array_length(T_INT));
const size_t elements_per_word = HeapWordSize / sizeof(jint); const size_t elements_per_word = HeapWordSize / sizeof(jint);
_filler_array_max_size = align_object_size(filler_array_hdr_size() + _filler_array_max_size = align_object_size(filler_array_hdr_size() +
max_len * elements_per_word); max_len / elements_per_word);
_barrier_set = NULL; _barrier_set = NULL;
_is_gc_active = false; _is_gc_active = false;
...@@ -303,10 +303,6 @@ size_t CollectedHeap::filler_array_min_size() { ...@@ -303,10 +303,6 @@ size_t CollectedHeap::filler_array_min_size() {
return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment return align_object_size(filler_array_hdr_size()); // align to MinObjAlignment
} }
size_t CollectedHeap::filler_array_max_size() {
return _filler_array_max_size;
}
#ifdef ASSERT #ifdef ASSERT
void CollectedHeap::fill_args_check(HeapWord* start, size_t words) void CollectedHeap::fill_args_check(HeapWord* start, size_t words)
{ {
...@@ -333,10 +329,11 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap) ...@@ -333,10 +329,11 @@ CollectedHeap::fill_with_array(HeapWord* start, size_t words, bool zap)
const size_t payload_size = words - filler_array_hdr_size(); const size_t payload_size = words - filler_array_hdr_size();
const size_t len = payload_size * HeapWordSize / sizeof(jint); const size_t len = payload_size * HeapWordSize / sizeof(jint);
assert((int)len >= 0, err_msg("size too large " SIZE_FORMAT " becomes %d", words, (int)len));
// Set the length first for concurrent GC. // Set the length first for concurrent GC.
((arrayOop)start)->set_length((int)len); ((arrayOop)start)->set_length((int)len);
post_allocation_setup_common(Universe::intArrayKlassObj(), start, words); post_allocation_setup_common(Universe::intArrayKlassObj(), start);
DEBUG_ONLY(zap_filler_array(start, words, zap);) DEBUG_ONLY(zap_filler_array(start, words, zap);)
} }
...@@ -349,8 +346,7 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap) ...@@ -349,8 +346,7 @@ CollectedHeap::fill_with_object_impl(HeapWord* start, size_t words, bool zap)
fill_with_array(start, words, zap); fill_with_array(start, words, zap);
} else if (words > 0) { } else if (words > 0) {
assert(words == min_fill_size(), "unaligned size"); assert(words == min_fill_size(), "unaligned size");
post_allocation_setup_common(SystemDictionary::Object_klass(), start, post_allocation_setup_common(SystemDictionary::Object_klass(), start);
words);
} }
} }
...@@ -480,7 +476,7 @@ oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle r ...@@ -480,7 +476,7 @@ oop CollectedHeap::Class_obj_allocate(KlassHandle klass, int size, KlassHandle r
assert(ScavengeRootsInCode > 0, "must be"); assert(ScavengeRootsInCode > 0, "must be");
obj = common_mem_allocate_init(size, CHECK_NULL); obj = common_mem_allocate_init(size, CHECK_NULL);
} }
post_allocation_setup_common(klass, obj, size); post_allocation_setup_common(klass, obj);
assert(Universe::is_bootstrapping() || assert(Universe::is_bootstrapping() ||
!((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
......
...@@ -128,7 +128,6 @@ class CollectedHeap : public CHeapObj { ...@@ -128,7 +128,6 @@ class CollectedHeap : public CHeapObj {
// Reinitialize tlabs before resuming mutators. // Reinitialize tlabs before resuming mutators.
virtual void resize_all_tlabs(); virtual void resize_all_tlabs();
protected:
// Allocate from the current thread's TLAB, with broken-out slow path. // Allocate from the current thread's TLAB, with broken-out slow path.
inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size); inline static HeapWord* allocate_from_tlab(Thread* thread, size_t size);
static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size); static HeapWord* allocate_from_tlab_slow(Thread* thread, size_t size);
...@@ -150,18 +149,14 @@ class CollectedHeap : public CHeapObj { ...@@ -150,18 +149,14 @@ class CollectedHeap : public CHeapObj {
inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS); inline static HeapWord* common_permanent_mem_allocate_init(size_t size, TRAPS);
// Helper functions for (VM) allocation. // Helper functions for (VM) allocation.
inline static void post_allocation_setup_common(KlassHandle klass, inline static void post_allocation_setup_common(KlassHandle klass, HeapWord* obj);
HeapWord* obj, size_t size);
inline static void post_allocation_setup_no_klass_install(KlassHandle klass, inline static void post_allocation_setup_no_klass_install(KlassHandle klass,
HeapWord* objPtr, HeapWord* objPtr);
size_t size);
inline static void post_allocation_setup_obj(KlassHandle klass, inline static void post_allocation_setup_obj(KlassHandle klass, HeapWord* obj);
HeapWord* obj, size_t size);
inline static void post_allocation_setup_array(KlassHandle klass, inline static void post_allocation_setup_array(KlassHandle klass,
HeapWord* obj, size_t size, HeapWord* obj, int length);
int length);
// Clears an allocated object. // Clears an allocated object.
inline static void init_obj(HeapWord* obj, size_t size); inline static void init_obj(HeapWord* obj, size_t size);
...@@ -169,7 +164,6 @@ class CollectedHeap : public CHeapObj { ...@@ -169,7 +164,6 @@ class CollectedHeap : public CHeapObj {
// Filler object utilities. // Filler object utilities.
static inline size_t filler_array_hdr_size(); static inline size_t filler_array_hdr_size();
static inline size_t filler_array_min_size(); static inline size_t filler_array_min_size();
static inline size_t filler_array_max_size();
DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);) DEBUG_ONLY(static void fill_args_check(HeapWord* start, size_t words);)
DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);) DEBUG_ONLY(static void zap_filler_array(HeapWord* start, size_t words, bool zap = true);)
...@@ -197,6 +191,10 @@ class CollectedHeap : public CHeapObj { ...@@ -197,6 +191,10 @@ class CollectedHeap : public CHeapObj {
G1CollectedHeap G1CollectedHeap
}; };
static inline size_t filler_array_max_size() {
return _filler_array_max_size;
}
virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; } virtual CollectedHeap::Name kind() const { return CollectedHeap::Abstract; }
/** /**
...@@ -366,9 +364,7 @@ class CollectedHeap : public CHeapObj { ...@@ -366,9 +364,7 @@ class CollectedHeap : public CHeapObj {
inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass, inline static oop permanent_obj_allocate_no_klass_install(KlassHandle klass,
int size, int size,
TRAPS); TRAPS);
inline static void post_allocation_install_obj_klass(KlassHandle klass, inline static void post_allocation_install_obj_klass(KlassHandle klass, oop obj);
oop obj,
int size);
inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS); inline static oop permanent_array_allocate(KlassHandle klass, int size, int length, TRAPS);
// Raw memory allocation facilities // Raw memory allocation facilities
...@@ -662,9 +658,6 @@ class CollectedHeap : public CHeapObj { ...@@ -662,9 +658,6 @@ class CollectedHeap : public CHeapObj {
} }
} }
// Allocate GCHeapLog during VM startup
static void initialize_heap_log();
// Heap verification // Heap verification
virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0; virtual void verify(bool allow_dirty, bool silent, VerifyOption option) = 0;
......
/* /*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -50,15 +50,13 @@ ...@@ -50,15 +50,13 @@
// Inline allocation implementations. // Inline allocation implementations.
void CollectedHeap::post_allocation_setup_common(KlassHandle klass, void CollectedHeap::post_allocation_setup_common(KlassHandle klass,
HeapWord* obj, HeapWord* obj) {
size_t size) { post_allocation_setup_no_klass_install(klass, obj);
post_allocation_setup_no_klass_install(klass, obj, size); post_allocation_install_obj_klass(klass, oop(obj));
post_allocation_install_obj_klass(klass, oop(obj), (int) size);
} }
void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
HeapWord* objPtr, HeapWord* objPtr) {
size_t size) {
oop obj = (oop)objPtr; oop obj = (oop)objPtr;
assert(obj != NULL, "NULL object pointer"); assert(obj != NULL, "NULL object pointer");
...@@ -71,8 +69,7 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass, ...@@ -71,8 +69,7 @@ void CollectedHeap::post_allocation_setup_no_klass_install(KlassHandle klass,
} }
void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass,
oop obj, oop obj) {
int size) {
// These asserts are kind of complicated because of klassKlass // These asserts are kind of complicated because of klassKlass
// and the beginning of the world. // and the beginning of the world.
assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass"); assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass");
...@@ -101,9 +98,8 @@ inline void post_allocation_notify(KlassHandle klass, oop obj) { ...@@ -101,9 +98,8 @@ inline void post_allocation_notify(KlassHandle klass, oop obj) {
} }
void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
HeapWord* obj, HeapWord* obj) {
size_t size) { post_allocation_setup_common(klass, obj);
post_allocation_setup_common(klass, obj, size);
assert(Universe::is_bootstrapping() || assert(Universe::is_bootstrapping() ||
!((oop)obj)->blueprint()->oop_is_array(), "must not be an array"); !((oop)obj)->blueprint()->oop_is_array(), "must not be an array");
// notify jvmti and dtrace // notify jvmti and dtrace
...@@ -112,14 +108,13 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass, ...@@ -112,14 +108,13 @@ void CollectedHeap::post_allocation_setup_obj(KlassHandle klass,
void CollectedHeap::post_allocation_setup_array(KlassHandle klass, void CollectedHeap::post_allocation_setup_array(KlassHandle klass,
HeapWord* obj, HeapWord* obj,
size_t size,
int length) { int length) {
// Set array length before setting the _klass field // Set array length before setting the _klass field
// in post_allocation_setup_common() because the klass field // in post_allocation_setup_common() because the klass field
// indicates that the object is parsable by concurrent GC. // indicates that the object is parsable by concurrent GC.
assert(length >= 0, "length should be non-negative"); assert(length >= 0, "length should be non-negative");
((arrayOop)obj)->set_length(length); ((arrayOop)obj)->set_length(length);
post_allocation_setup_common(klass, obj, size); post_allocation_setup_common(klass, obj);
assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array"); assert(((oop)obj)->blueprint()->oop_is_array(), "must be an array");
// notify jvmti and dtrace (must be after length is set for dtrace) // notify jvmti and dtrace (must be after length is set for dtrace)
post_allocation_notify(klass, (oop)obj); post_allocation_notify(klass, (oop)obj);
...@@ -256,7 +251,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) { ...@@ -256,7 +251,7 @@ oop CollectedHeap::obj_allocate(KlassHandle klass, int size, TRAPS) {
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_obj(klass, obj, size); post_allocation_setup_obj(klass, obj);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj; return (oop)obj;
} }
...@@ -269,7 +264,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass, ...@@ -269,7 +264,7 @@ oop CollectedHeap::array_allocate(KlassHandle klass,
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL); HeapWord* obj = common_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_array(klass, obj, size, length); post_allocation_setup_array(klass, obj, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj; return (oop)obj;
} }
...@@ -283,7 +278,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass, ...@@ -283,7 +278,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL); HeapWord* obj = common_mem_allocate_noinit(size, CHECK_NULL);
((oop)obj)->set_klass_gap(0); ((oop)obj)->set_klass_gap(0);
post_allocation_setup_array(klass, obj, size, length); post_allocation_setup_array(klass, obj, length);
#ifndef PRODUCT #ifndef PRODUCT
const size_t hs = oopDesc::header_size()+1; const size_t hs = oopDesc::header_size()+1;
Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs); Universe::heap()->check_for_non_bad_heap_word_value(obj+hs, size-hs);
...@@ -293,7 +288,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass, ...@@ -293,7 +288,7 @@ oop CollectedHeap::array_allocate_nozero(KlassHandle klass,
oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) { oop CollectedHeap::permanent_obj_allocate(KlassHandle klass, int size, TRAPS) {
oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL); oop obj = permanent_obj_allocate_no_klass_install(klass, size, CHECK_NULL);
post_allocation_install_obj_klass(klass, obj, size); post_allocation_install_obj_klass(klass, obj);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj, NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value((HeapWord*) obj,
size)); size));
return obj; return obj;
...@@ -306,7 +301,7 @@ oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass, ...@@ -306,7 +301,7 @@ oop CollectedHeap::permanent_obj_allocate_no_klass_install(KlassHandle klass,
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_no_klass_install(klass, obj, size); post_allocation_setup_no_klass_install(klass, obj);
#ifndef PRODUCT #ifndef PRODUCT
const size_t hs = oopDesc::header_size(); const size_t hs = oopDesc::header_size();
Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs); Universe::heap()->check_for_bad_heap_word_value(obj+hs, size-hs);
...@@ -322,7 +317,7 @@ oop CollectedHeap::permanent_array_allocate(KlassHandle klass, ...@@ -322,7 +317,7 @@ oop CollectedHeap::permanent_array_allocate(KlassHandle klass,
assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed"); assert(!Universe::heap()->is_gc_active(), "Allocation during gc not allowed");
assert(size >= 0, "int won't convert to size_t"); assert(size >= 0, "int won't convert to size_t");
HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL); HeapWord* obj = common_permanent_mem_allocate_init(size, CHECK_NULL);
post_allocation_setup_array(klass, obj, size, length); post_allocation_setup_array(klass, obj, length);
NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size)); NOT_PRODUCT(Universe::heap()->check_for_bad_heap_word_value(obj, size));
return (oop)obj; return (oop)obj;
} }
......
...@@ -297,16 +297,14 @@ public: ...@@ -297,16 +297,14 @@ public:
if (obj->blueprint()->oop_is_instanceKlass()) { if (obj->blueprint()->oop_is_instanceKlass()) {
instanceKlass* ik = instanceKlass::cast((klassOop)obj); instanceKlass* ik = instanceKlass::cast((klassOop)obj);
typeArrayOop inner_classes = ik->inner_classes(); instanceKlassHandle ik_h((klassOop)obj);
if (inner_classes != NULL) { InnerClassesIterator iter(ik_h);
constantPoolOop constants = ik->constants(); constantPoolOop constants = ik->constants();
int n = inner_classes->length(); for (; !iter.done(); iter.next()) {
for (int i = 0; i < n; i += instanceKlass::inner_class_next_offset) { int index = iter.inner_name_index();
int ioff = i + instanceKlass::inner_class_inner_name_offset;
int index = inner_classes->ushort_at(ioff); if (index != 0) {
if (index != 0) { _closure->do_symbol(constants->symbol_at_addr(index));
_closure->do_symbol(constants->symbol_at_addr(index));
}
} }
} }
} }
......
...@@ -1133,6 +1133,36 @@ JNIid* instanceKlass::jni_id_for(int offset) { ...@@ -1133,6 +1133,36 @@ JNIid* instanceKlass::jni_id_for(int offset) {
return probe; return probe;
} }
u2 instanceKlass::enclosing_method_data(int offset) {
typeArrayOop inner_class_list = inner_classes();
if (inner_class_list == NULL) {
return 0;
}
int length = inner_class_list->length();
if (length % inner_class_next_offset == 0) {
return 0;
} else {
int index = length - enclosing_method_attribute_size;
typeArrayHandle inner_class_list_h(inner_class_list);
assert(offset < enclosing_method_attribute_size, "invalid offset");
return inner_class_list_h->ushort_at(index + offset);
}
}
void instanceKlass::set_enclosing_method_indices(u2 class_index,
u2 method_index) {
typeArrayOop inner_class_list = inner_classes();
assert (inner_class_list != NULL, "_inner_classes list is not set up");
int length = inner_class_list->length();
if (length % inner_class_next_offset == enclosing_method_attribute_size) {
int index = length - enclosing_method_attribute_size;
typeArrayHandle inner_class_list_h(inner_class_list);
inner_class_list_h->ushort_at_put(
index + enclosing_method_class_index_offset, class_index);
inner_class_list_h->ushort_at_put(
index + enclosing_method_method_index_offset, method_index);
}
}
// Lookup or create a jmethodID. // Lookup or create a jmethodID.
// This code is called by the VMThread and JavaThreads so the // This code is called by the VMThread and JavaThreads so the
...@@ -2107,28 +2137,21 @@ jint instanceKlass::compute_modifier_flags(TRAPS) const { ...@@ -2107,28 +2137,21 @@ jint instanceKlass::compute_modifier_flags(TRAPS) const {
jint access = access_flags().as_int(); jint access = access_flags().as_int();
// But check if it happens to be member class. // But check if it happens to be member class.
typeArrayOop inner_class_list = inner_classes(); instanceKlassHandle ik(THREAD, k);
int length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); InnerClassesIterator iter(ik);
assert (length % instanceKlass::inner_class_next_offset == 0, "just checking"); for (; !iter.done(); iter.next()) {
if (length > 0) { int ioff = iter.inner_class_info_index();
typeArrayHandle inner_class_list_h(THREAD, inner_class_list); // Inner class attribute can be zero, skip it.
instanceKlassHandle ik(THREAD, k); // Strange but true: JVM spec. allows null inner class refs.
for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { if (ioff == 0) continue;
int ioff = inner_class_list_h->ushort_at(
i + instanceKlass::inner_class_inner_class_info_offset); // only look at classes that are already loaded
// since we are looking for the flags for our self.
// Inner class attribute can be zero, skip it. Symbol* inner_name = ik->constants()->klass_name_at(ioff);
// Strange but true: JVM spec. allows null inner class refs. if ((ik->name() == inner_name)) {
if (ioff == 0) continue; // This is really a member class.
access = iter.inner_access_flags();
// only look at classes that are already loaded break;
// since we are looking for the flags for our self.
Symbol* inner_name = ik->constants()->klass_name_at(ioff);
if ((ik->name() == inner_name)) {
// This is really a member class.
access = inner_class_list_h->ushort_at(i + instanceKlass::inner_class_access_flags_offset);
break;
}
} }
} }
// Remember to strip ACC_SUPER bit // Remember to strip ACC_SUPER bit
......
...@@ -188,7 +188,17 @@ class instanceKlass: public Klass { ...@@ -188,7 +188,17 @@ class instanceKlass: public Klass {
klassOop _host_klass; klassOop _host_klass;
// Class signers. // Class signers.
objArrayOop _signers; objArrayOop _signers;
// inner_classes attribute. // The InnerClasses attribute and EnclosingMethod attribute. The
// _inner_classes is an array of shorts. If the class has InnerClasses
// attribute, then the _inner_classes array begins with 4-tuples of shorts
// [inner_class_info_index, outer_class_info_index,
// inner_name_index, inner_class_access_flags] for the InnerClasses
// attribute. If the EnclosingMethod attribute exists, it occupies the
// last two shorts [class_index, method_index] of the array. If only
// the InnerClasses attribute exists, the _inner_classes array length is
// number_of_inner_classes * 4. If the class has both InnerClasses
// and EnclosingMethod attributes the _inner_classes array length is
// number_of_inner_classes * 4 + enclosing_method_attribute_size.
typeArrayOop _inner_classes; typeArrayOop _inner_classes;
// Implementors of this interface (not valid if it overflows) // Implementors of this interface (not valid if it overflows)
klassOop _implementors[implementors_limit]; klassOop _implementors[implementors_limit];
...@@ -251,8 +261,6 @@ class instanceKlass: public Klass { ...@@ -251,8 +261,6 @@ class instanceKlass: public Klass {
// Array of interesting part(s) of the previous version(s) of this // Array of interesting part(s) of the previous version(s) of this
// instanceKlass. See PreviousVersionWalker below. // instanceKlass. See PreviousVersionWalker below.
GrowableArray<PreviousVersionNode *>* _previous_versions; GrowableArray<PreviousVersionNode *>* _previous_versions;
u2 _enclosing_method_class_index; // Constant pool index for class of enclosing method, or 0 if none
u2 _enclosing_method_method_index; // Constant pool index for name and type of enclosing method, or 0 if none
// JVMTI fields can be moved to their own structure - see 6315920 // JVMTI fields can be moved to their own structure - see 6315920
unsigned char * _cached_class_file_bytes; // JVMTI: cached class file, before retransformable agent modified it in CFLH unsigned char * _cached_class_file_bytes; // JVMTI: cached class file, before retransformable agent modified it in CFLH
jint _cached_class_file_len; // JVMTI: length of above jint _cached_class_file_len; // JVMTI: length of above
...@@ -351,6 +359,12 @@ class instanceKlass: public Klass { ...@@ -351,6 +359,12 @@ class instanceKlass: public Klass {
inner_class_next_offset = 4 inner_class_next_offset = 4
}; };
enum EnclosingMethodAttributeOffset {
enclosing_method_class_index_offset = 0,
enclosing_method_method_index_offset = 1,
enclosing_method_attribute_size = 2
};
// method override check // method override check
bool is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS); bool is_override(methodHandle super_method, Handle targetclassloader, Symbol* targetclassname, TRAPS);
...@@ -533,11 +547,15 @@ class instanceKlass: public Klass { ...@@ -533,11 +547,15 @@ class instanceKlass: public Klass {
Symbol* generic_signature() const { return _generic_signature; } Symbol* generic_signature() const { return _generic_signature; }
void set_generic_signature(Symbol* sig) { _generic_signature = sig; } void set_generic_signature(Symbol* sig) { _generic_signature = sig; }
u2 enclosing_method_class_index() const { return _enclosing_method_class_index; } u2 enclosing_method_data(int offset);
u2 enclosing_method_method_index() const { return _enclosing_method_method_index; } u2 enclosing_method_class_index() {
return enclosing_method_data(enclosing_method_class_index_offset);
}
u2 enclosing_method_method_index() {
return enclosing_method_data(enclosing_method_method_index_offset);
}
void set_enclosing_method_indices(u2 class_index, void set_enclosing_method_indices(u2 class_index,
u2 method_index) { _enclosing_method_class_index = class_index; u2 method_index);
_enclosing_method_method_index = method_index; }
// jmethodID support // jmethodID support
static jmethodID get_jmethod_id(instanceKlassHandle ik_h, static jmethodID get_jmethod_id(instanceKlassHandle ik_h,
...@@ -1053,4 +1071,83 @@ class nmethodBucket: public CHeapObj { ...@@ -1053,4 +1071,83 @@ class nmethodBucket: public CHeapObj {
nmethod* get_nmethod() { return _nmethod; } nmethod* get_nmethod() { return _nmethod; }
}; };
// An iterator that's used to access the inner classes indices in the
// instanceKlass::_inner_classes array.
class InnerClassesIterator : public StackObj {
private:
typeArrayHandle _inner_classes;
int _length;
int _idx;
public:
InnerClassesIterator(instanceKlassHandle k) {
_inner_classes = k->inner_classes();
if (k->inner_classes() != NULL) {
_length = _inner_classes->length();
// The inner class array's length should be the multiple of
// inner_class_next_offset if it only contains the InnerClasses
// attribute data, or it should be
// n*inner_class_next_offset+enclosing_method_attribute_size
// if it also contains the EnclosingMethod data.
assert((_length % instanceKlass::inner_class_next_offset == 0 ||
_length % instanceKlass::inner_class_next_offset == instanceKlass::enclosing_method_attribute_size),
"just checking");
// Remove the enclosing_method portion if exists.
if (_length % instanceKlass::inner_class_next_offset == instanceKlass::enclosing_method_attribute_size) {
_length -= instanceKlass::enclosing_method_attribute_size;
}
} else {
_length = 0;
}
_idx = 0;
}
int length() const {
return _length;
}
void next() {
_idx += instanceKlass::inner_class_next_offset;
}
bool done() const {
return (_idx >= _length);
}
u2 inner_class_info_index() const {
return _inner_classes->ushort_at(
_idx + instanceKlass::inner_class_inner_class_info_offset);
}
void set_inner_class_info_index(u2 index) {
_inner_classes->ushort_at_put(
_idx + instanceKlass::inner_class_inner_class_info_offset, index);
}
u2 outer_class_info_index() const {
return _inner_classes->ushort_at(
_idx + instanceKlass::inner_class_outer_class_info_offset);
}
void set_outer_class_info_index(u2 index) {
_inner_classes->ushort_at_put(
_idx + instanceKlass::inner_class_outer_class_info_offset, index);
}
u2 inner_name_index() const {
return _inner_classes->ushort_at(
_idx + instanceKlass::inner_class_inner_name_offset);
}
void set_inner_name_index(u2 index) {
_inner_classes->ushort_at_put(
_idx + instanceKlass::inner_class_inner_name_offset, index);
}
u2 inner_access_flags() const {
return _inner_classes->ushort_at(
_idx + instanceKlass::inner_class_access_flags_offset);
}
};
#endif // SHARE_VM_OOPS_INSTANCEKLASS_HPP #endif // SHARE_VM_OOPS_INSTANCEKLASS_HPP
...@@ -416,7 +416,6 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it ...@@ -416,7 +416,6 @@ instanceKlassKlass::allocate_instance_klass(Symbol* name, int vtable_len, int it
ik->set_methods_annotations(NULL); ik->set_methods_annotations(NULL);
ik->set_methods_parameter_annotations(NULL); ik->set_methods_parameter_annotations(NULL);
ik->set_methods_default_annotations(NULL); ik->set_methods_default_annotations(NULL);
ik->set_enclosing_method_indices(0, 0);
ik->set_jvmti_cached_class_field_map(NULL); ik->set_jvmti_cached_class_field_map(NULL);
ik->set_initial_method_idnum(0); ik->set_initial_method_idnum(0);
assert(k()->is_parsable(), "should be parsable here."); assert(k()->is_parsable(), "should be parsable here.");
......
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -174,10 +174,9 @@ KlassHandle Klass::base_create_klass(KlassHandle& klass, int size, ...@@ -174,10 +174,9 @@ KlassHandle Klass::base_create_klass(KlassHandle& klass, int size,
} }
void Klass_vtbl::post_new_init_klass(KlassHandle& klass, void Klass_vtbl::post_new_init_klass(KlassHandle& klass,
klassOop new_klass, klassOop new_klass) const {
int size) const {
assert(!new_klass->klass_part()->null_vtbl(), "Not a complete klass"); assert(!new_klass->klass_part()->null_vtbl(), "Not a complete klass");
CollectedHeap::post_allocation_install_obj_klass(klass, new_klass, size); CollectedHeap::post_allocation_install_obj_klass(klass, new_klass);
} }
void* Klass_vtbl::operator new(size_t ignored, KlassHandle& klass, void* Klass_vtbl::operator new(size_t ignored, KlassHandle& klass,
......
/* /*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
...@@ -149,7 +149,7 @@ class Klass_vtbl { ...@@ -149,7 +149,7 @@ class Klass_vtbl {
// by the shared "base_create" subroutines. // by the shared "base_create" subroutines.
// //
virtual void* allocate_permanent(KlassHandle& klass, int size, TRAPS) const = 0; virtual void* allocate_permanent(KlassHandle& klass, int size, TRAPS) const = 0;
void post_new_init_klass(KlassHandle& klass, klassOop obj, int size) const; void post_new_init_klass(KlassHandle& klass, klassOop obj) const;
// Every subclass on which vtbl_value is called must include this macro. // Every subclass on which vtbl_value is called must include this macro.
// Delay the installation of the klassKlass pointer until after the // Delay the installation of the klassKlass pointer until after the
...@@ -160,7 +160,7 @@ class Klass_vtbl { ...@@ -160,7 +160,7 @@ class Klass_vtbl {
if (HAS_PENDING_EXCEPTION) return NULL; \ if (HAS_PENDING_EXCEPTION) return NULL; \
klassOop new_klass = ((Klass*) result)->as_klassOop(); \ klassOop new_klass = ((Klass*) result)->as_klassOop(); \
OrderAccess::storestore(); \ OrderAccess::storestore(); \
post_new_init_klass(klass_klass, new_klass, size); \ post_new_init_klass(klass_klass, new_klass); \
return result; \ return result; \
} }
......
...@@ -1301,9 +1301,6 @@ JVM_END ...@@ -1301,9 +1301,6 @@ JVM_END
// Inner class reflection /////////////////////////////////////////////////////////////////////////////// // Inner class reflection ///////////////////////////////////////////////////////////////////////////////
JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass)) JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
const int inner_class_info_index = 0;
const int outer_class_info_index = 1;
JvmtiVMObjectAllocEventCollector oam; JvmtiVMObjectAllocEventCollector oam;
// ofClass is a reference to a java_lang_Class object. The mirror object // ofClass is a reference to a java_lang_Class object. The mirror object
// of an instanceKlass // of an instanceKlass
...@@ -1315,26 +1312,26 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass)) ...@@ -1315,26 +1312,26 @@ JVM_ENTRY(jobjectArray, JVM_GetDeclaredClasses(JNIEnv *env, jclass ofClass))
} }
instanceKlassHandle k(thread, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass))); instanceKlassHandle k(thread, java_lang_Class::as_klassOop(JNIHandles::resolve_non_null(ofClass)));
InnerClassesIterator iter(k);
if (k->inner_classes()->length() == 0) { if (iter.length() == 0) {
// Neither an inner nor outer class // Neither an inner nor outer class
oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL); oop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), 0, CHECK_NULL);
return (jobjectArray)JNIHandles::make_local(env, result); return (jobjectArray)JNIHandles::make_local(env, result);
} }
// find inner class info // find inner class info
typeArrayHandle icls(thread, k->inner_classes());
constantPoolHandle cp(thread, k->constants()); constantPoolHandle cp(thread, k->constants());
int length = icls->length(); int length = iter.length();
// Allocate temp. result array // Allocate temp. result array
objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), length/4, CHECK_NULL); objArrayOop r = oopFactory::new_objArray(SystemDictionary::Class_klass(), length/4, CHECK_NULL);
objArrayHandle result (THREAD, r); objArrayHandle result (THREAD, r);
int members = 0; int members = 0;
for(int i = 0; i < length; i += 4) { for (; !iter.done(); iter.next()) {
int ioff = icls->ushort_at(i + inner_class_info_index); int ioff = iter.inner_class_info_index();
int ooff = icls->ushort_at(i + outer_class_info_index); int ooff = iter.outer_class_info_index();
if (ioff != 0 && ooff != 0) { if (ioff != 0 && ooff != 0) {
// Check to see if the name matches the class we're looking for // Check to see if the name matches the class we're looking for
...@@ -1392,17 +1389,13 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k, ...@@ -1392,17 +1389,13 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
bool* inner_is_member, bool* inner_is_member,
TRAPS) { TRAPS) {
Thread* thread = THREAD; Thread* thread = THREAD;
const int inner_class_info_index = inner_class_inner_class_info_offset; InnerClassesIterator iter(k);
const int outer_class_info_index = inner_class_outer_class_info_offset; if (iter.length() == 0) {
if (k->inner_classes()->length() == 0) {
// No inner class info => no declaring class // No inner class info => no declaring class
return NULL; return NULL;
} }
typeArrayHandle i_icls(thread, k->inner_classes());
constantPoolHandle i_cp(thread, k->constants()); constantPoolHandle i_cp(thread, k->constants());
int i_length = i_icls->length();
bool found = false; bool found = false;
klassOop ok; klassOop ok;
...@@ -1410,10 +1403,10 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k, ...@@ -1410,10 +1403,10 @@ klassOop instanceKlass::compute_enclosing_class_impl(instanceKlassHandle k,
*inner_is_member = false; *inner_is_member = false;
// Find inner_klass attribute // Find inner_klass attribute
for (int i = 0; i < i_length && !found; i += inner_class_next_offset) { for (; !iter.done() && !found; iter.next()) {
int ioff = i_icls->ushort_at(i + inner_class_info_index); int ioff = iter.inner_class_info_index();
int ooff = i_icls->ushort_at(i + outer_class_info_index); int ooff = iter.outer_class_info_index();
int noff = i_icls->ushort_at(i + inner_class_inner_name_offset); int noff = iter.inner_name_index();
if (ioff != 0) { if (ioff != 0) {
// Check to see if the name matches the class we're looking for // Check to see if the name matches the class we're looking for
// before attempting to find the class. // before attempting to find the class.
......
...@@ -292,8 +292,8 @@ void JvmtiClassFileReconstituter::write_signature_attribute(u2 generic_signature ...@@ -292,8 +292,8 @@ void JvmtiClassFileReconstituter::write_signature_attribute(u2 generic_signature
// Compute the number of entries in the InnerClasses attribute // Compute the number of entries in the InnerClasses attribute
u2 JvmtiClassFileReconstituter::inner_classes_attribute_length() { u2 JvmtiClassFileReconstituter::inner_classes_attribute_length() {
typeArrayOop inner_class_list = ikh()->inner_classes(); InnerClassesIterator iter(ikh());
return (inner_class_list == NULL) ? 0 : inner_class_list->length(); return iter.length();
} }
// Write an annotation attribute. The VM stores them in raw form, so all we need // Write an annotation attribute. The VM stores them in raw form, so all we need
...@@ -324,26 +324,20 @@ void JvmtiClassFileReconstituter::write_annotations_attribute(const char* attr_n ...@@ -324,26 +324,20 @@ void JvmtiClassFileReconstituter::write_annotations_attribute(const char* attr_n
// JVMSpec| } classes[number_of_classes]; // JVMSpec| } classes[number_of_classes];
// JVMSpec| } // JVMSpec| }
void JvmtiClassFileReconstituter::write_inner_classes_attribute(int length) { void JvmtiClassFileReconstituter::write_inner_classes_attribute(int length) {
typeArrayOop inner_class_list = ikh()->inner_classes(); InnerClassesIterator iter(ikh());
guarantee(inner_class_list != NULL && inner_class_list->length() == length, guarantee(iter.length() != 0 && iter.length() == length,
"caller must check"); "caller must check");
typeArrayHandle inner_class_list_h(thread(), inner_class_list);
assert (length % instanceKlass::inner_class_next_offset == 0, "just checking");
u2 entry_count = length / instanceKlass::inner_class_next_offset; u2 entry_count = length / instanceKlass::inner_class_next_offset;
u4 size = 2 + entry_count * (2+2+2+2); u4 size = 2 + entry_count * (2+2+2+2);
write_attribute_name_index("InnerClasses"); write_attribute_name_index("InnerClasses");
write_u4(size); write_u4(size);
write_u2(entry_count); write_u2(entry_count);
for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { for (; !iter.done(); iter.next()) {
write_u2(inner_class_list_h->ushort_at( write_u2(iter.inner_class_info_index());
i + instanceKlass::inner_class_inner_class_info_offset)); write_u2(iter.outer_class_info_index());
write_u2(inner_class_list_h->ushort_at( write_u2(iter.inner_name_index());
i + instanceKlass::inner_class_outer_class_info_offset)); write_u2(iter.inner_access_flags());
write_u2(inner_class_list_h->ushort_at(
i + instanceKlass::inner_class_inner_name_offset));
write_u2(inner_class_list_h->ushort_at(
i + instanceKlass::inner_class_access_flags_offset));
} }
} }
......
...@@ -2400,44 +2400,33 @@ void VM_RedefineClasses::set_new_constant_pool( ...@@ -2400,44 +2400,33 @@ void VM_RedefineClasses::set_new_constant_pool(
// new constant indices as needed. The inner classes info is a // new constant indices as needed. The inner classes info is a
// quadruple: // quadruple:
// (inner_class_info, outer_class_info, inner_name, inner_access_flags) // (inner_class_info, outer_class_info, inner_name, inner_access_flags)
typeArrayOop inner_class_list = scratch_class->inner_classes(); InnerClassesIterator iter(scratch_class);
int icl_length = (inner_class_list == NULL) ? 0 : inner_class_list->length(); for (; !iter.done(); iter.next()) {
if (icl_length > 0) { int cur_index = iter.inner_class_info_index();
typeArrayHandle inner_class_list_h(THREAD, inner_class_list); if (cur_index == 0) {
for (int i = 0; i < icl_length; continue; // JVM spec. allows null inner class refs so skip it
i += instanceKlass::inner_class_next_offset) { }
int cur_index = inner_class_list_h->ushort_at(i int new_index = find_new_index(cur_index);
+ instanceKlass::inner_class_inner_class_info_offset); if (new_index != 0) {
if (cur_index == 0) { RC_TRACE_WITH_THREAD(0x00080000, THREAD,
continue; // JVM spec. allows null inner class refs so skip it ("inner_class_info change: %d to %d", cur_index, new_index));
} iter.set_inner_class_info_index(new_index);
int new_index = find_new_index(cur_index); }
if (new_index != 0) { cur_index = iter.outer_class_info_index();
RC_TRACE_WITH_THREAD(0x00080000, THREAD, new_index = find_new_index(cur_index);
("inner_class_info change: %d to %d", cur_index, new_index)); if (new_index != 0) {
inner_class_list_h->ushort_at_put(i RC_TRACE_WITH_THREAD(0x00080000, THREAD,
+ instanceKlass::inner_class_inner_class_info_offset, new_index); ("outer_class_info change: %d to %d", cur_index, new_index));
} iter.set_outer_class_info_index(new_index);
cur_index = inner_class_list_h->ushort_at(i }
+ instanceKlass::inner_class_outer_class_info_offset); cur_index = iter.inner_name_index();
new_index = find_new_index(cur_index); new_index = find_new_index(cur_index);
if (new_index != 0) { if (new_index != 0) {
RC_TRACE_WITH_THREAD(0x00080000, THREAD, RC_TRACE_WITH_THREAD(0x00080000, THREAD,
("outer_class_info change: %d to %d", cur_index, new_index)); ("inner_name change: %d to %d", cur_index, new_index));
inner_class_list_h->ushort_at_put(i iter.set_inner_name_index(new_index);
+ instanceKlass::inner_class_outer_class_info_offset, new_index); }
} } // end for each inner class
cur_index = inner_class_list_h->ushort_at(i
+ instanceKlass::inner_class_inner_name_offset);
new_index = find_new_index(cur_index);
if (new_index != 0) {
RC_TRACE_WITH_THREAD(0x00080000, THREAD,
("inner_name change: %d to %d", cur_index, new_index));
inner_class_list_h->ushort_at_put(i
+ instanceKlass::inner_class_inner_name_offset, new_index);
}
} // end for each inner class
} // end if we have inner classes
// Attach each method in klass to the new constant pool and update // Attach each method in klass to the new constant pool and update
// to use new constant pool indices as needed: // to use new constant pool indices as needed:
......
...@@ -591,14 +591,11 @@ bool Reflection::is_same_package_member(klassOop class1, klassOop class2, TRAPS) ...@@ -591,14 +591,11 @@ bool Reflection::is_same_package_member(klassOop class1, klassOop class2, TRAPS)
// Caller is responsible for figuring out in advance which case must be true. // Caller is responsible for figuring out in advance which case must be true.
void Reflection::check_for_inner_class(instanceKlassHandle outer, instanceKlassHandle inner, void Reflection::check_for_inner_class(instanceKlassHandle outer, instanceKlassHandle inner,
bool inner_is_member, TRAPS) { bool inner_is_member, TRAPS) {
const int inner_class_info_index = 0; InnerClassesIterator iter(outer);
const int outer_class_info_index = 1;
typeArrayHandle icls (THREAD, outer->inner_classes());
constantPoolHandle cp (THREAD, outer->constants()); constantPoolHandle cp (THREAD, outer->constants());
for(int i = 0; i < icls->length(); i += 4) { for (; !iter.done(); iter.next()) {
int ioff = icls->ushort_at(i + inner_class_info_index); int ioff = iter.inner_class_info_index();
int ooff = icls->ushort_at(i + outer_class_info_index); int ooff = iter.outer_class_info_index();
if (inner_is_member && ioff != 0 && ooff != 0) { if (inner_is_member && ioff != 0 && ooff != 0) {
klassOop o = cp->klass_at(ooff, CHECK); klassOop o = cp->klass_at(ooff, CHECK);
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
# Makefile to run various jdk tests # Makefile to run various jdk tests
# #
GETMIXEDPATH=echo
# Get OS/ARCH specifics # Get OS/ARCH specifics
OSNAME = $(shell uname -s) OSNAME = $(shell uname -s)
ifeq ($(OSNAME), SunOS) ifeq ($(OSNAME), SunOS)
...@@ -60,7 +62,14 @@ ifeq ($(findstring BSD,$(OSNAME)), BSD) ...@@ -60,7 +62,14 @@ ifeq ($(findstring BSD,$(OSNAME)), BSD)
ARCH = i586 ARCH = i586
endif endif
endif endif
ifeq ($(OSNAME), Windows_NT) ifeq ($(PLATFORM),)
# detect wether we're running in MKS or cygwin
ifeq ($(OSNAME), Windows_NT) # MKS
GETMIXEDPATH=dosname -s
endif
ifeq ($(findstring CYGWIN,$(OSNAME)), CYGWIN)
GETMIXEDPATH=cygpath -m -s
endif
PLATFORM = windows PLATFORM = windows
SLASH_JAVA = J: SLASH_JAVA = J:
ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),ia64) ifeq ($(word 1, $(PROCESSOR_IDENTIFIER)),ia64)
...@@ -234,11 +243,11 @@ wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG) ...@@ -234,11 +243,11 @@ wbapitest: prep $(JT_HOME) $(PRODUCT_HOME) $(JTREG)
$(JTREG) -a -v:fail,error \ $(JTREG) -a -v:fail,error \
$(JTREG_KEY_OPTION) \ $(JTREG_KEY_OPTION) \
$(EXTRA_JTREG_OPTIONS) \ $(EXTRA_JTREG_OPTIONS) \
-r:$(ABS_TEST_OUTPUT_DIR)/JTreport \ -r:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTreport \
-w:$(ABS_TEST_OUTPUT_DIR)/JTwork \ -w:$(shell $(GETMIXEDPATH) "$(ABS_TEST_OUTPUT_DIR)")/JTwork \
-jdk:$(PRODUCT_HOME) \ -jdk:$(shell $(GETMIXEDPATH) "$(PRODUCT_HOME)") \
$(JAVA_OPTIONS:%=-vmoption:%) \ $(JAVA_OPTIONS:%=-vmoption:%) \
$(TEST_ROOT)/sanity \ $(shell $(GETMIXEDPATH) "$(TEST_ROOT)")/sanity \
|| $(BUNDLE_UP_FAILED) || $(BUNDLE_UP_FAILED)
$(BUNDLE_UP) $(BUNDLE_UP)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册