提交 46228038 编写于 作者: A asaha

Merge

......@@ -402,3 +402,4 @@ b2426da30009cd3069d03de073f351e6432c7682 hs25-b61
ce42d815dd2130250acf6132b51b624001638f0d jdk8-b119
05fedd51e40da22c9460bf17c7185889e435db3d hs25-b62
fca262db9c4309f99d2f5542ab0780e45c2f1578 jdk8-b120
41f4cad94c581034d4c427d2aaabcc20f26342d0 hs25-b63
......@@ -373,7 +373,7 @@ public class ObjectHeap {
visitor.epilogue();
}
private void addLiveRegions(List input, List output) {
private void addLiveRegions(String name, List input, List output) {
for (Iterator itr = input.iterator(); itr.hasNext();) {
MemRegion reg = (MemRegion) itr.next();
Address top = reg.end();
......@@ -386,6 +386,9 @@ public class ObjectHeap {
}
output.add(top);
output.add(bottom);
if (DEBUG) {
System.err.println("Live region: " + name + ": " + bottom + ", " + top);
}
}
}
......@@ -395,7 +398,7 @@ public class ObjectHeap {
}
public void doSpace(Space s) {
addLiveRegions(s.getLiveRegions(), liveRegions);
addLiveRegions(s.toString(), s.getLiveRegions(), liveRegions);
}
private List liveRegions;
}
......@@ -426,11 +429,11 @@ public class ObjectHeap {
ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
PSYoungGen youngGen = psh.youngGen();
// Add eden space
addLiveRegions(youngGen.edenSpace().getLiveRegions(), liveRegions);
addLiveRegions("eden", youngGen.edenSpace().getLiveRegions(), liveRegions);
// Add from-space but not to-space
addLiveRegions(youngGen.fromSpace().getLiveRegions(), liveRegions);
addLiveRegions("from", youngGen.fromSpace().getLiveRegions(), liveRegions);
PSOldGen oldGen = psh.oldGen();
addLiveRegions(oldGen.objectSpace().getLiveRegions(), liveRegions);
addLiveRegions("old ", oldGen.objectSpace().getLiveRegions(), liveRegions);
} else if (heap instanceof G1CollectedHeap) {
G1CollectedHeap g1h = (G1CollectedHeap) heap;
g1h.heapRegionIterate(lrc);
......@@ -451,7 +454,6 @@ public class ObjectHeap {
if (VM.getVM().getUseTLAB()) {
for (JavaThread thread = VM.getVM().getThreads().first(); thread != null; thread = thread.next()) {
if (thread.isJavaThread()) {
ThreadLocalAllocBuffer tlab = thread.tlab();
if (tlab.start() != null) {
if ((tlab.top() == null) || (tlab.end() == null)) {
......@@ -459,6 +461,12 @@ public class ObjectHeap {
thread.printThreadIDOn(System.err);
System.err.println();
} else {
if (DEBUG) {
System.err.print("TLAB for " + thread.getThreadName() + ", #");
thread.printThreadIDOn(System.err);
System.err.print(": ");
tlab.printOn(System.err);
}
// Go from:
// - below start() to start()
// - start() to top()
......@@ -471,7 +479,6 @@ public class ObjectHeap {
}
}
}
}
// Now sort live regions
sortLiveRegions(liveRegions);
......@@ -480,6 +487,15 @@ public class ObjectHeap {
Assert.that(liveRegions.size() % 2 == 0, "Must have even number of region boundaries");
}
if (DEBUG) {
System.err.println("liveRegions:");
for (int i = 0; i < liveRegions.size(); i += 2) {
Address bottom = (Address) liveRegions.get(i);
Address top = (Address) liveRegions.get(i+1);
System.err.println(" " + bottom + " - " + top);
}
}
return liveRegions;
}
......
......@@ -109,6 +109,6 @@ public class ThreadLocalAllocBuffer extends VMObject {
public void printOn(PrintStream tty) {
tty.println(" [" + start() + "," +
top() + "," + end() + ")");
top() + "," + end() + ",{" + hardEnd() + "})");
}
}
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
HS_BUILD_NUMBER=62
HS_BUILD_NUMBER=63
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -94,13 +94,6 @@ bool frame::safe_for_sender(JavaThread *thread) {
// other generic buffer blobs are more problematic so we just assume they are
// ok. adapter blobs never have a frame complete and are never ok.
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
if (!Interpreter::contains(_pc) && _cb->frame_size() <= 0) {
//assert(0, "Invalid frame_size");
return false;
}
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
......@@ -144,6 +137,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
// must be some sort of compiled/runtime frame
// fp does not have to be safe (although it could be check for c1?)
// check for a valid frame_size, otherwise we are unlikely to get a valid sender_pc
if (_cb->frame_size() <= 0) {
return false;
}
sender_sp = _unextended_sp + _cb->frame_size();
// On Intel the return_address is always the word on the stack
sender_pc = (address) *(sender_sp-1);
......
......@@ -418,7 +418,7 @@ void CompiledIC::compute_monomorphic_entry(methodHandle method,
TRAPS) {
nmethod* method_code = method->code();
address entry = NULL;
if (method_code != NULL) {
if (method_code != NULL && method_code->is_in_use()) {
// Call to compiled code
if (static_bound || is_optimized) {
entry = method_code->verified_entry_point();
......@@ -545,7 +545,7 @@ void CompiledStaticCall::set(const StaticCallInfo& info) {
void CompiledStaticCall::compute_entry(methodHandle m, StaticCallInfo& info) {
nmethod* m_code = m->code();
info._callee = m;
if (m_code != NULL) {
if (m_code != NULL && m_code->is_in_use()) {
info._to_interpreter = false;
info._entry = m_code->verified_entry_point();
} else {
......
......@@ -459,7 +459,7 @@ const char* nmethod::compile_kind() const {
// Fill in default values for various flag fields
void nmethod::init_defaults() {
_state = alive;
_state = in_use;
_marked_for_reclamation = 0;
_has_flushed_dependencies = 0;
_has_unsafe_access = 0;
......@@ -2393,8 +2393,8 @@ void nmethod::verify() {
void nmethod::verify_interrupt_point(address call_site) {
// Verify IC only when nmethod installation is finished.
bool is_installed = (method()->code() == this) // nmethod is in state 'alive' and installed
|| !this->is_in_use(); // nmethod is installed, but not in 'alive' state
bool is_installed = (method()->code() == this) // nmethod is in state 'in_use' and installed
|| !this->is_in_use(); // nmethod is installed, but not in 'in_use' state
if (is_installed) {
Thread *cur = Thread::current();
if (CompiledIC_lock->owner() == cur ||
......
......@@ -184,11 +184,12 @@ class nmethod : public CodeBlob {
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
#endif
enum { alive = 0,
not_entrant = 1, // uncommon trap has happened but activations may still exist
zombie = 2,
unloaded = 3 };
enum { in_use = 0, // executable nmethod
not_entrant = 1, // marked for deoptimization but activations may still exist,
// will be transformed to zombie when all activations are gone
zombie = 2, // no activations exist, nmethod is ready for purge
unloaded = 3 }; // there should be no activations, should not be called,
// will be transformed to zombie immediately
jbyte _scavenge_root_state;
......@@ -407,8 +408,8 @@ class nmethod : public CodeBlob {
address verified_entry_point() const { return _verified_entry_point; } // if klass is correct
// flag accessing and manipulation
bool is_in_use() const { return _state == alive; }
bool is_alive() const { return _state == alive || _state == not_entrant; }
bool is_in_use() const { return _state == in_use; }
bool is_alive() const { return _state == in_use || _state == not_entrant; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; }
......
......@@ -27,6 +27,7 @@
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1HotCardCache.hpp"
#include "runtime/java.hpp"
ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
_threads(NULL), _n_threads(0),
......@@ -62,6 +63,10 @@ ConcurrentG1Refine::ConcurrentG1Refine(G1CollectedHeap* g1h) :
for (int i = _n_threads - 1; i >= 0; i--) {
ConcurrentG1RefineThread* t = new ConcurrentG1RefineThread(this, next, worker_id_offset, i);
assert(t != NULL, "Conc refine should have been created");
if (t->osthread() == NULL) {
vm_shutdown_during_initialization("Could not create ConcurrentG1RefineThread");
}
assert(t->cg1r() == this, "Conc refine thread should refer to this");
_threads[i] = t;
next = t;
......
......@@ -553,6 +553,9 @@ ConcurrentMark::ConcurrentMark(G1CollectedHeap* g1h, ReservedSpace heap_rs) :
_cmThread = new ConcurrentMarkThread(this);
assert(cmThread() != NULL, "CM Thread should have been created");
assert(cmThread()->cm() != NULL, "CM Thread should refer to this cm");
if (_cmThread->osthread() == NULL) {
vm_shutdown_during_initialization("Could not create ConcurrentMarkThread");
}
assert(CGC_lock != NULL, "Where's the CGC_lock?");
assert(_markBitMap1.covers(heap_rs), "_markBitMap1 inconsistency");
......
......@@ -2433,20 +2433,6 @@ void PSParallelCompact::marking_phase(ParCompactionManager* cm,
_gc_tracer.report_object_count_after_gc(is_alive_closure());
}
void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
ClassLoaderData* cld = klass->class_loader_data();
// The actual processing of the klass is done when we
// traverse the list of Klasses in the class loader data.
PSParallelCompact::follow_class_loader(cm, cld);
}
void PSParallelCompact::adjust_klass(ParCompactionManager* cm, Klass* klass) {
ClassLoaderData* cld = klass->class_loader_data();
// The actual processing of the klass is done when we
// traverse the list of Klasses in the class loader data.
PSParallelCompact::adjust_class_loader(cm, cld);
}
void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
ClassLoaderData* cld) {
PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
......@@ -2455,13 +2441,6 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm,
cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
}
void PSParallelCompact::adjust_class_loader(ParCompactionManager* cm,
ClassLoaderData* cld) {
cld->oops_do(PSParallelCompact::adjust_pointer_closure(),
PSParallelCompact::adjust_klass_closure(),
true);
}
// This should be moved to the shared markSweep code!
class PSAlwaysTrueClosure: public BoolObjectClosure {
public:
......
......@@ -1200,13 +1200,10 @@ class PSParallelCompact : AllStatic {
T* p);
template <class T> static inline void adjust_pointer(T* p);
static void follow_klass(ParCompactionManager* cm, Klass* klass);
static void adjust_klass(ParCompactionManager* cm, Klass* klass);
static inline void follow_klass(ParCompactionManager* cm, Klass* klass);
static void follow_class_loader(ParCompactionManager* cm,
ClassLoaderData* klass);
static void adjust_class_loader(ParCompactionManager* cm,
ClassLoaderData* klass);
// Compaction support.
// Return true if p is in the range [beg_addr, end_addr).
......@@ -1380,6 +1377,11 @@ inline void PSParallelCompact::adjust_pointer(T* p) {
}
}
inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) {
oop holder = klass->klass_holder();
PSParallelCompact::mark_and_push(cm, &holder);
}
template <class T>
inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) {
mark_and_push(_compaction_manager, p);
......
......@@ -242,8 +242,20 @@ void LinkResolver::resolve_klass(KlassHandle& result, constantPoolHandle pool, i
// Look up method in klasses, including static methods
// Then look up local default methods
void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, TRAPS) {
void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, bool in_imethod_resolve, TRAPS) {
Method* result_oop = klass->uncached_lookup_method(name, signature);
// JDK 8, JVMS 5.4.3.4: Interface method resolution should
// ignore static and non-public methods of java.lang.Object,
// like clone, finalize, registerNatives.
if (in_imethod_resolve &&
result_oop != NULL &&
klass->is_interface() &&
(result_oop->is_static() || !result_oop->is_public()) &&
result_oop->method_holder() == SystemDictionary::Object_klass()) {
result_oop = NULL;
}
if (result_oop == NULL) {
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
if (default_methods != NULL) {
......@@ -420,28 +432,18 @@ void LinkResolver::check_method_accessability(KlassHandle ref_klass,
AccessFlags flags = sel_method->access_flags();
// Special case #1: arrays always override "clone". JVMS 2.15.
// Special case: arrays always override "clone". JVMS 2.15.
// If the resolved klass is an array class, and the declaring class
// is java.lang.Object and the method is "clone", set the flags
// to public.
// Special case #2: If the resolved klass is an interface, and
// the declaring class is java.lang.Object, and the method is
// "clone" or "finalize", set the flags to public. If the
// resolved interface does not contain "clone" or "finalize"
// methods, the method/interface method resolution looks to
// the interface's super class, java.lang.Object. With JDK 8
// interface accessability check requirement, special casing
// this scenario is necessary to avoid an IAE.
//
// We'll check for each method name first and then java.lang.Object
// to best short-circuit out of these tests.
if (((sel_method->name() == vmSymbols::clone_name() &&
(resolved_klass->oop_is_array() || resolved_klass->is_interface())) ||
(sel_method->name() == vmSymbols::finalize_method_name() &&
resolved_klass->is_interface())) &&
sel_klass() == SystemDictionary::Object_klass()) {
// We'll check for the method name first, as that's most likely
// to be false (so we'll short-circuit out of these tests).
if (sel_method->name() == vmSymbols::clone_name() &&
sel_klass() == SystemDictionary::Object_klass() &&
resolved_klass->oop_is_array()) {
// We need to change "protected" to "public".
assert(flags.is_protected(), "clone or finalize not protected?");
assert(flags.is_protected(), "clone not protected?");
jint new_flags = flags.as_int();
new_flags = new_flags & (~JVM_ACC_PROTECTED);
new_flags = new_flags | JVM_ACC_PUBLIC;
......@@ -531,7 +533,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
}
// 2. lookup method in resolved klass and its super klasses
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, true, CHECK);
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, true, false, CHECK);
if (resolved_method.is_null()) { // not found in the class hierarchy
// 3. lookup method in all the interfaces implemented by the resolved klass
......@@ -628,7 +630,7 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
// lookup method in this interface or its super, java.lang.Object
// JDK8: also look for static methods
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, false, CHECK);
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, false, true, CHECK);
if (resolved_method.is_null()) {
// lookup method in all the super-interfaces
......@@ -943,8 +945,17 @@ void LinkResolver::linktime_resolve_special_method(methodHandle& resolved_method
Klass *klass_to_check = !InstanceKlass::cast(current_klass())->is_anonymous() ?
current_klass() :
InstanceKlass::cast(current_klass())->host_klass();
if (!InstanceKlass::cast(klass_to_check)->is_same_or_direct_interface(resolved_klass())) {
// As of the fix for 4486457 we disable verification for all of the
// dynamically-generated bytecodes associated with the 1.4
// reflection implementation, not just those associated with
// sun/reflect/SerializationConstructorAccessor.
bool is_reflect = JDK_Version::is_gte_jdk14x_version() &&
UseNewReflection &&
klass_to_check->is_subclass_of(
SystemDictionary::reflect_MagicAccessorImpl_klass());
if (!is_reflect &&
!InstanceKlass::cast(klass_to_check)->is_same_or_direct_interface(resolved_klass())) {
ResourceMark rm(THREAD);
char buf[200];
jio_snprintf(buf, sizeof(buf),
......
......@@ -124,7 +124,7 @@ class LinkResolver: AllStatic {
friend class klassItable;
private:
static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, TRAPS);
static void lookup_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, bool checkpolymorphism, bool in_imethod_resolve, TRAPS);
static void lookup_instance_method_in_klasses (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_method_in_interfaces (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
static void lookup_polymorphic_method (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
......
......@@ -287,7 +287,7 @@ class VirtualSpaceNode : public CHeapObj<mtClass> {
VirtualSpace* virtual_space() const { return (VirtualSpace*) &_virtual_space; }
// Returns true if "word_size" is available in the VirtualSpace
bool is_available(size_t word_size) { return _top + word_size <= end(); }
bool is_available(size_t word_size) { return word_size <= pointer_delta(end(), _top, sizeof(MetaWord)); }
MetaWord* top() const { return _top; }
void inc_top(size_t word_size) { _top += word_size; }
......@@ -3641,10 +3641,82 @@ class TestVirtualSpaceNodeTest {
}
}
#define assert_is_available_positive(word_size) \
assert(vsn.is_available(word_size), \
err_msg(#word_size ": " PTR_FORMAT " bytes were not available in " \
"VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
(uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
#define assert_is_available_negative(word_size) \
assert(!vsn.is_available(word_size), \
err_msg(#word_size ": " PTR_FORMAT " bytes should not be available in " \
"VirtualSpaceNode [" PTR_FORMAT ", " PTR_FORMAT ")", \
(uintptr_t)(word_size * BytesPerWord), vsn.bottom(), vsn.end()));
static void test_is_available_positive() {
// Reserve some memory.
VirtualSpaceNode vsn(os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
assert(expanded, "Failed to commit");
// Check that is_available accepts the committed size.
assert_is_available_positive(commit_word_size);
// Check that is_available accepts half the committed size.
size_t expand_word_size = commit_word_size / 2;
assert_is_available_positive(expand_word_size);
}
static void test_is_available_negative() {
// Reserve some memory.
VirtualSpaceNode vsn(os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
assert(expanded, "Failed to commit");
// Check that is_available doesn't accept a too large size.
size_t two_times_commit_word_size = commit_word_size * 2;
assert_is_available_negative(two_times_commit_word_size);
}
static void test_is_available_overflow() {
// Reserve some memory.
VirtualSpaceNode vsn(os::vm_allocation_granularity());
assert(vsn.initialize(), "Failed to setup VirtualSpaceNode");
// Commit some memory.
size_t commit_word_size = os::vm_allocation_granularity() / BytesPerWord;
bool expanded = vsn.expand_by(commit_word_size, commit_word_size);
assert(expanded, "Failed to commit");
// Calculate a size that will overflow the virtual space size.
void* virtual_space_max = (void*)(uintptr_t)-1;
size_t bottom_to_max = pointer_delta(virtual_space_max, vsn.bottom(), 1);
size_t overflow_size = bottom_to_max + BytesPerWord;
size_t overflow_word_size = overflow_size / BytesPerWord;
// Check that is_available can handle the overflow.
assert_is_available_negative(overflow_word_size);
}
static void test_is_available() {
TestVirtualSpaceNodeTest::test_is_available_positive();
TestVirtualSpaceNodeTest::test_is_available_negative();
TestVirtualSpaceNodeTest::test_is_available_overflow();
}
};
void TestVirtualSpaceNode_test() {
TestVirtualSpaceNodeTest::test();
TestVirtualSpaceNodeTest::test_is_available();
}
#endif
......@@ -150,10 +150,6 @@ void InstanceClassLoaderKlass::oop_push_contents(PSPromotionManager* pm, oop obj
int InstanceClassLoaderKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
InstanceKlass::oop_update_pointers(cm, obj);
ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
if (loader_data != NULL) {
PSParallelCompact::adjust_class_loader(cm, loader_data);
}
return size_helper();
}
#endif // INCLUDE_ALL_GCS
......
......@@ -2199,7 +2199,6 @@ int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
obj, \
PSParallelCompact::adjust_pointer(p), \
assert_is_in)
obj->update_header(cm);
return size;
}
......
......@@ -155,8 +155,13 @@ void InstanceMirrorKlass::oop_follow_contents(oop obj) {
// Follow the klass field in the mirror.
Klass* klass = java_lang_Class::as_Klass(obj);
if (klass != NULL) {
// For anonymous classes we need to handle the class loader data,
// otherwise it won't be claimed and can be unloaded.
// An anonymous class doesn't have its own class loader, so the call
// to follow_klass will mark and push its java mirror instead of the
// class loader. When handling the java mirror for an anonymous class
// we need to make sure its class loader data is claimed, this is done
// by calling follow_class_loader explicitly. For non-anonymous classes
// the call to follow_class_loader is made when the class loader itself
// is handled.
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
MarkSweep::follow_class_loader(klass->class_loader_data());
} else {
......@@ -183,7 +188,18 @@ void InstanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm,
// Follow the klass field in the mirror.
Klass* klass = java_lang_Class::as_Klass(obj);
if (klass != NULL) {
// An anonymous class doesn't have its own class loader, so the call
// to follow_klass will mark and push its java mirror instead of the
// class loader. When handling the java mirror for an anonymous class
// we need to make sure its class loader data is claimed, this is done
// by calling follow_class_loader explicitly. For non-anonymous classes
// the call to follow_class_loader is made when the class loader itself
// is handled.
if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
PSParallelCompact::follow_class_loader(cm, klass->class_loader_data());
} else {
PSParallelCompact::follow_klass(cm, klass);
}
} else {
// If klass is NULL then this a mirror for a primitive type.
// We don't have to follow them, since they are handled as strong
......@@ -332,17 +348,6 @@ int InstanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj)
int size = oop_size(obj);
InstanceKlass::oop_update_pointers(cm, obj);
// Follow the klass field in the mirror.
Klass* klass = java_lang_Class::as_Klass(obj);
if (klass != NULL) {
PSParallelCompact::adjust_klass(cm, klass);
} else {
// If klass is NULL then this a mirror for a primitive type.
// We don't have to follow them, since they are handled as strong
// roots in Universe::oops_do.
assert(java_lang_Class::is_primitive(obj), "Sanity check");
}
InstanceMirrorKlass_OOP_ITERATE( \
start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\
PSParallelCompact::adjust_pointer(p), \
......
......@@ -587,7 +587,6 @@ int ObjArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
assert (obj->is_objArray(), "obj must be obj array");
objArrayOop a = objArrayOop(obj);
int size = a->object_size();
a->update_header(cm);
ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p))
return size;
}
......
......@@ -328,11 +328,6 @@ class oopDesc {
// return the size of this oop. This is used by the MarkSweep collector.
int adjust_pointers();
#if INCLUDE_ALL_GCS
// Parallel old
void update_header(ParCompactionManager* cm);
#endif // INCLUDE_ALL_GCS
// mark-sweep support
void follow_body(int begin, int end);
......
......@@ -80,8 +80,4 @@ inline oop oopDesc::forward_to_atomic(oop p) {
return forwardee();
}
inline void oopDesc::update_header(ParCompactionManager* cm) {
PSParallelCompact::adjust_klass(cm, klass());
}
#endif // SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP
......@@ -2071,6 +2071,11 @@ const Type *LoadNode::klass_value_common( PhaseTransform *phase ) const {
if (t != NULL) {
// constant oop => constant klass
if (offset == java_lang_Class::array_klass_offset_in_bytes()) {
if (t->is_void()) {
// We cannot create a void array. Since void is a primitive type return null
// klass. Users of this result need to do a null check on the returned klass.
return TypePtr::NULL_PTR;
}
return TypeKlassPtr::make(ciArrayKlass::make(t));
}
if (!t->is_klass()) {
......
......@@ -537,15 +537,26 @@ bool Reflection::verify_field_access(Klass* current_class,
return true;
}
Klass* host_class = current_class;
while (host_class->oop_is_instance() &&
InstanceKlass::cast(host_class)->is_anonymous()) {
Klass* next_host_class = InstanceKlass::cast(host_class)->host_klass();
if (next_host_class == NULL) break;
host_class = next_host_class;
}
if (host_class == field_class) {
return true;
}
if (access.is_protected()) {
if (!protected_restriction) {
// See if current_class is a subclass of field_class
if (current_class->is_subclass_of(field_class)) {
// See if current_class (or outermost host class) is a subclass of field_class
if (host_class->is_subclass_of(field_class)) {
if (access.is_static() || // static fields are ok, see 6622385
current_class == resolved_class ||
field_class == resolved_class ||
current_class->is_subclass_of(resolved_class) ||
resolved_class->is_subclass_of(current_class)) {
host_class->is_subclass_of(resolved_class) ||
resolved_class->is_subclass_of(host_class)) {
return true;
}
}
......
......@@ -1178,12 +1178,12 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
CodeBlob* caller_cb = caller_frame.cb();
guarantee(caller_cb != NULL && caller_cb->is_nmethod(), "must be called from nmethod");
nmethod* caller_nm = caller_cb->as_nmethod_or_null();
// make sure caller is not getting deoptimized
// and removed before we are done with it.
// CLEANUP - with lazy deopt shouldn't need this lock
nmethodLocker caller_lock(caller_nm);
// determine call info & receiver
// note: a) receiver is NULL for static calls
// b) an exception is thrown if receiver is NULL for non-static calls
......@@ -1198,6 +1198,11 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
(!is_virtual && invoke_code == Bytecodes::_invokedynamic) ||
( is_virtual && invoke_code != Bytecodes::_invokestatic ), "inconsistent bytecode");
// We do not patch the call site if the caller nmethod has been made non-entrant.
if (!caller_nm->is_in_use()) {
return callee_method;
}
#ifndef PRODUCT
// tracing/debugging/statistics
int *addr = (is_optimized) ? (&_resolve_opt_virtual_ctr) :
......@@ -1237,6 +1242,10 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
// Make sure the callee nmethod does not get deoptimized and removed before
// we are done patching the code.
nmethod* callee_nm = callee_method->code();
if (callee_nm != NULL && !callee_nm->is_in_use()) {
// Patch call site to C2I adapter if callee nmethod is deoptimized or unloaded.
callee_nm = NULL;
}
nmethodLocker nl_callee(callee_nm);
#ifdef ASSERT
address dest_entry_point = callee_nm == NULL ? 0 : callee_nm->entry_point(); // used below
......@@ -1258,15 +1267,24 @@ methodHandle SharedRuntime::resolve_sub_helper(JavaThread *thread,
{
MutexLocker ml_patch(CompiledIC_lock);
// Lock blocks for safepoint during which both nmethods can change state.
// Now that we are ready to patch if the Method* was redefined then
// don't update call site and let the caller retry.
if (!callee_method->is_old()) {
// Don't update call site if caller nmethod has been made non-entrant
// as it is a waste of time.
// Don't update call site if callee nmethod was unloaded or deoptimized.
// Don't update call site if callee nmethod was replaced by an other nmethod
// which may happen when multiply alive nmethod (tiered compilation)
// will be supported.
if (!callee_method->is_old() && caller_nm->is_in_use() &&
(callee_nm == NULL || callee_nm->is_in_use() && (callee_method->code() == callee_nm))) {
#ifdef ASSERT
// We must not try to patch to jump to an already unloaded method.
if (dest_entry_point != 0) {
assert(CodeCache::find_blob(dest_entry_point) != NULL,
"should not unload nmethod while locked");
CodeBlob* cb = CodeCache::find_blob(dest_entry_point);
assert((cb != NULL) && cb->is_nmethod() && (((nmethod*)cb) == callee_nm),
"should not call unloaded nmethod");
}
#endif
if (is_virtual) {
......
......@@ -174,6 +174,7 @@ typedef uint64_t julong;
inline jint jint_cast (jfloat x) { return *(jint* )&x; }
inline jlong jlong_cast (jdouble x) { return *(jlong* )&x; }
inline julong julong_cast (jdouble x) { return *(julong* )&x; }
inline jfloat jfloat_cast (jint x) { return *(jfloat* )&x; }
inline jdouble jdouble_cast(jlong x) { return *(jdouble*)&x; }
......
......@@ -46,13 +46,17 @@ public class SpreadNullArg {
mh_spread_target =
MethodHandles.lookup().findStatic(SpreadNullArg.class, "target_spread_arg", mt_ref_arg);
result = (int) mh_spreadInvoker.invokeExact(mh_spread_target, (Object[]) null);
} catch(NullPointerException e) {
// Expected exception - do nothing!
} catch(Throwable e) {
throw new Error("Expected IllegalArgumentException was not thrown");
} catch (IllegalArgumentException e) {
System.out.println("Expected exception : " + e);
} catch (Throwable e) {
throw new Error(e);
}
if (result != 42) throw new Error("Expected NullPointerException was not thrown");
if (result != 42) {
throw new Error("result [" + result
+ "] != 42 : Expected IllegalArgumentException was not thrown?");
}
}
public static int target_spread_arg(Integer i1) {
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8029366
* @summary ShouldNotReachHere error when creating an array with component type of void
*/
public class ArrayNewInstanceOfVoid {
public static void main(String[] args) {
for (int i = 0; i < 100_000; i++) {
test();
}
}
private static void test() {
try {
java.lang.reflect.Array.newInstance(void.class, 2);
} catch (IllegalArgumentException e) {
// expected
}
}
}
......@@ -34,8 +34,9 @@
*/
import java.util.concurrent.atomic.*;
class C1ObjectSpillInLogicOp {
static public void main(String[] args) {
public class C1ObjectSpillInLogicOp {
public static void main(String[] args) {
AtomicReferenceArray<Integer> x = new AtomicReferenceArray(128);
Integer y = new Integer(0);
for (int i = 0; i < 50000; i++) {
......
......@@ -22,10 +22,10 @@
*/
/*
* @ignore 8028741
* @test
* @bug 8024804
* @summary registerNatives() interface resolution should receive IAE
* @bug 8028741
* @summary interface method resolution should skip finding j.l.Object's registerNatives() and succeed in selecting class B's registerNatives()
* @run main RegisterNatives
*/
public class RegisterNatives {
......@@ -38,10 +38,10 @@ public class RegisterNatives {
try {
val.registerNatives();
} catch (IllegalAccessError e) {
System.out.println("TEST PASSES - according to current JVM spec, IAE expected\n");
return;
System.out.println("TEST FAILS - JDK 8 JVMS, static and non-public methods of j.l.Object should be ignored during interface method resolution\n");
e.printStackTrace();
throw e;
}
System.out.println("TEST FAILS - no IAE resulted\n");
System.exit(1);
System.out.println("TEST PASSES - no IAE resulted\n");
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册