提交 f8c2e8fd 编写于 作者: J jrose

Merge

...@@ -242,10 +242,10 @@ void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) { ...@@ -242,10 +242,10 @@ void Compilation::setup_code_buffer(CodeBuffer* code, int call_stub_estimate) {
code->insts()->initialize_shared_locs((relocInfo*)locs_buffer, code->insts()->initialize_shared_locs((relocInfo*)locs_buffer,
locs_buffer_size / sizeof(relocInfo)); locs_buffer_size / sizeof(relocInfo));
code->initialize_consts_size(Compilation::desired_max_constant_size()); code->initialize_consts_size(Compilation::desired_max_constant_size());
// Call stubs + deopt/exception handler // Call stubs + two deopt handlers (regular and MH) + exception handler
code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) + code->initialize_stubs_size((call_stub_estimate * LIR_Assembler::call_stub_size) +
LIR_Assembler::exception_handler_size + LIR_Assembler::exception_handler_size +
LIR_Assembler::deopt_handler_size); 2 * LIR_Assembler::deopt_handler_size);
} }
......
...@@ -584,6 +584,7 @@ nmethod::nmethod( ...@@ -584,6 +584,7 @@ nmethod::nmethod(
_oops_do_mark_link = NULL; _oops_do_mark_link = NULL;
_method = method; _method = method;
_entry_bci = InvocationEntryBci; _entry_bci = InvocationEntryBci;
_jmethod_id = NULL;
_osr_link = NULL; _osr_link = NULL;
_scavenge_root_link = NULL; _scavenge_root_link = NULL;
_scavenge_root_state = 0; _scavenge_root_state = 0;
...@@ -677,6 +678,7 @@ nmethod::nmethod( ...@@ -677,6 +678,7 @@ nmethod::nmethod(
_oops_do_mark_link = NULL; _oops_do_mark_link = NULL;
_method = method; _method = method;
_entry_bci = InvocationEntryBci; _entry_bci = InvocationEntryBci;
_jmethod_id = NULL;
_osr_link = NULL; _osr_link = NULL;
_scavenge_root_link = NULL; _scavenge_root_link = NULL;
_scavenge_root_state = 0; _scavenge_root_state = 0;
...@@ -784,6 +786,7 @@ nmethod::nmethod( ...@@ -784,6 +786,7 @@ nmethod::nmethod(
NOT_PRODUCT(_has_debug_info = false); NOT_PRODUCT(_has_debug_info = false);
_oops_do_mark_link = NULL; _oops_do_mark_link = NULL;
_method = method; _method = method;
_jmethod_id = NULL;
_compile_id = compile_id; _compile_id = compile_id;
_comp_level = comp_level; _comp_level = comp_level;
_entry_bci = entry_bci; _entry_bci = entry_bci;
...@@ -1488,11 +1491,25 @@ void nmethod::post_compiled_method_load_event() { ...@@ -1488,11 +1491,25 @@ void nmethod::post_compiled_method_load_event() {
moop->signature()->utf8_length(), moop->signature()->utf8_length(),
code_begin(), code_size()); code_begin(), code_size());
if (JvmtiExport::should_post_compiled_method_load() ||
JvmtiExport::should_post_compiled_method_unload()) {
get_and_cache_jmethod_id();
}
if (JvmtiExport::should_post_compiled_method_load()) { if (JvmtiExport::should_post_compiled_method_load()) {
JvmtiExport::post_compiled_method_load(this); JvmtiExport::post_compiled_method_load(this);
} }
} }
jmethodID nmethod::get_and_cache_jmethod_id() {
if (_jmethod_id == NULL) {
// Cache the jmethod_id since it can no longer be looked up once the
// method itself has been marked for unloading.
_jmethod_id = method()->jmethod_id();
}
return _jmethod_id;
}
void nmethod::post_compiled_method_unload() { void nmethod::post_compiled_method_unload() {
if (unload_reported()) { if (unload_reported()) {
// During unloading we transition to unloaded and then to zombie // During unloading we transition to unloaded and then to zombie
...@@ -1504,12 +1521,17 @@ void nmethod::post_compiled_method_unload() { ...@@ -1504,12 +1521,17 @@ void nmethod::post_compiled_method_unload() {
DTRACE_METHOD_UNLOAD_PROBE(method()); DTRACE_METHOD_UNLOAD_PROBE(method());
// If a JVMTI agent has enabled the CompiledMethodUnload event then // If a JVMTI agent has enabled the CompiledMethodUnload event then
// post the event. Sometime later this nmethod will be made a zombie by // post the event. Sometime later this nmethod will be made a zombie
// the sweeper but the methodOop will not be valid at that point. // by the sweeper but the methodOop will not be valid at that point.
if (JvmtiExport::should_post_compiled_method_unload()) { // If the _jmethod_id is null then no load event was ever requested
// so don't bother posting the unload. The main reason for this is
// that the jmethodID is a weak reference to the methodOop so if
// it's being unloaded there's no way to look it up since the weak
// ref will have been cleared.
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded"); assert(!unload_reported(), "already unloaded");
HandleMark hm; HandleMark hm;
JvmtiExport::post_compiled_method_unload(method()->jmethod_id(), code_begin()); JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin());
} }
// The JVMTI CompiledMethodUnload event can be enabled or disabled at // The JVMTI CompiledMethodUnload event can be enabled or disabled at
......
...@@ -135,6 +135,7 @@ class nmethod : public CodeBlob { ...@@ -135,6 +135,7 @@ class nmethod : public CodeBlob {
methodOop _method; methodOop _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
// To support simple linked-list chaining of nmethods: // To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from instanceKlass::osr_nmethods_head nmethod* _osr_link; // from instanceKlass::osr_nmethods_head
...@@ -599,6 +600,7 @@ public: ...@@ -599,6 +600,7 @@ public:
// jvmti support: // jvmti support:
void post_compiled_method_load_event(); void post_compiled_method_load_event();
jmethodID get_and_cache_jmethod_id();
// verify operations // verify operations
void verify(); void verify();
......
...@@ -217,21 +217,21 @@ jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) { ...@@ -217,21 +217,21 @@ jvmtiError JvmtiCodeBlobEvents::generate_dynamic_code_events(JvmtiEnv* env) {
class nmethodDesc: public CHeapObj { class nmethodDesc: public CHeapObj {
private: private:
methodHandle _method; jmethodID _jmethod_id;
address _code_begin; address _code_begin;
address _code_end; address _code_end;
jvmtiAddrLocationMap* _map; jvmtiAddrLocationMap* _map;
jint _map_length; jint _map_length;
public: public:
nmethodDesc(methodHandle method, address code_begin, address code_end, nmethodDesc(jmethodID jmethod_id, address code_begin, address code_end,
jvmtiAddrLocationMap* map, jint map_length) { jvmtiAddrLocationMap* map, jint map_length) {
_method = method; _jmethod_id = jmethod_id;
_code_begin = code_begin; _code_begin = code_begin;
_code_end = code_end; _code_end = code_end;
_map = map; _map = map;
_map_length = map_length; _map_length = map_length;
} }
methodHandle method() const { return _method; } jmethodID jmethod_id() const { return _jmethod_id; }
address code_begin() const { return _code_begin; } address code_begin() const { return _code_begin; }
address code_end() const { return _code_end; } address code_end() const { return _code_end; }
jvmtiAddrLocationMap* map() const { return _map; } jvmtiAddrLocationMap* map() const { return _map; }
...@@ -323,8 +323,7 @@ void nmethodCollector::do_nmethod(nmethod* nm) { ...@@ -323,8 +323,7 @@ void nmethodCollector::do_nmethod(nmethod* nm) {
JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length); JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length);
// record the nmethod details // record the nmethod details
methodHandle mh(nm->method()); nmethodDesc* snm = new nmethodDesc(nm->get_and_cache_jmethod_id(),
nmethodDesc* snm = new nmethodDesc(mh,
nm->code_begin(), nm->code_begin(),
nm->code_end(), nm->code_end(),
map, map,
...@@ -367,8 +366,7 @@ jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* e ...@@ -367,8 +366,7 @@ jvmtiError JvmtiCodeBlobEvents::generate_compiled_method_load_events(JvmtiEnv* e
// iterate over the list and post an event for each nmethod // iterate over the list and post an event for each nmethod
nmethodDesc* nm_desc = collector.first(); nmethodDesc* nm_desc = collector.first();
while (nm_desc != NULL) { while (nm_desc != NULL) {
methodOop method = nm_desc->method()(); jmethodID mid = nm_desc->jmethod_id();
jmethodID mid = method->jmethod_id();
assert(mid != NULL, "checking"); assert(mid != NULL, "checking");
JvmtiExport::post_compiled_method_load(env, mid, JvmtiExport::post_compiled_method_load(env, mid,
(jint)(nm_desc->code_end() - nm_desc->code_begin()), (jint)(nm_desc->code_end() - nm_desc->code_begin()),
......
...@@ -66,6 +66,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) { ...@@ -66,6 +66,7 @@ jobject JNIHandles::make_local(JNIEnv* env, oop obj) {
jobject JNIHandles::make_global(Handle obj) { jobject JNIHandles::make_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
jobject res = NULL; jobject res = NULL;
if (!obj.is_null()) { if (!obj.is_null()) {
// ignore null handles // ignore null handles
...@@ -81,6 +82,7 @@ jobject JNIHandles::make_global(Handle obj) { ...@@ -81,6 +82,7 @@ jobject JNIHandles::make_global(Handle obj) {
jobject JNIHandles::make_weak_global(Handle obj) { jobject JNIHandles::make_weak_global(Handle obj) {
assert(!Universe::heap()->is_gc_active(), "can't extend the root set during GC");
jobject res = NULL; jobject res = NULL;
if (!obj.is_null()) { if (!obj.is_null()) {
// ignore null handles // ignore null handles
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
# include "incls/_sweeper.cpp.incl" # include "incls/_sweeper.cpp.incl"
long NMethodSweeper::_traversals = 0; // No. of stack traversals performed long NMethodSweeper::_traversals = 0; // No. of stack traversals performed
CodeBlob* NMethodSweeper::_current = NULL; // Current nmethod nmethod* NMethodSweeper::_current = NULL; // Current nmethod
int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache int NMethodSweeper::_seen = 0 ; // No. of blobs we have currently processed in current pass of CodeCache
int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass int NMethodSweeper::_invocations = 0; // No. of invocations left until we are completed with this pass
...@@ -171,20 +171,16 @@ void NMethodSweeper::sweep_code_cache() { ...@@ -171,20 +171,16 @@ void NMethodSweeper::sweep_code_cache() {
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod. // Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
// Other blobs can be deleted by other threads // Other blobs can be deleted by other threads
// Read next before we potentially delete current // Read next before we potentially delete current
CodeBlob* next = CodeCache::next_nmethod(_current); nmethod* next = CodeCache::next_nmethod(_current);
// Now ready to process nmethod and give up CodeCache_lock // Now ready to process nmethod and give up CodeCache_lock
{ {
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
process_nmethod((nmethod *)_current); process_nmethod(_current);
} }
_seen++; _seen++;
_current = next; _current = next;
} }
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
// can be freed async to us and make _current invalid while we sleep.
_current = CodeCache::next_nmethod(_current);
} }
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) { if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
......
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
class NMethodSweeper : public AllStatic { class NMethodSweeper : public AllStatic {
static long _traversals; // Stack traversal count static long _traversals; // Stack traversal count
static CodeBlob* _current; // Current nmethod static nmethod* _current; // Current nmethod
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static int _invocations; // No. of invocations left until we are completed with this pass static int _invocations; // No. of invocations left until we are completed with this pass
......
...@@ -111,6 +111,35 @@ char* ReservedSpace::reserve_and_align(const size_t reserve_size, ...@@ -111,6 +111,35 @@ char* ReservedSpace::reserve_and_align(const size_t reserve_size,
return result; return result;
} }
// Helper method.
static bool failed_to_reserve_as_requested(char* base, char* requested_address,
const size_t size, bool special)
{
if (base == requested_address || requested_address == NULL)
return false; // did not fail
if (base != NULL) {
// Different reserve address may be acceptable in other cases
// but for compressed oops heap should be at requested address.
assert(UseCompressedOops, "currently requested address used only for compressed oops");
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Reserved memory at not requested address: " PTR_FORMAT " vs " PTR_FORMAT, base, requested_address);
}
// OS ignored requested address. Try different address.
if (special) {
if (!os::release_memory_special(base, size)) {
fatal("os::release_memory_special failed");
}
} else {
if (!os::release_memory(base, size)) {
fatal("os::release_memory failed");
}
}
}
return true;
}
ReservedSpace::ReservedSpace(const size_t prefix_size, ReservedSpace::ReservedSpace(const size_t prefix_size,
const size_t prefix_align, const size_t prefix_align,
const size_t suffix_size, const size_t suffix_size,
...@@ -129,6 +158,10 @@ ReservedSpace::ReservedSpace(const size_t prefix_size, ...@@ -129,6 +158,10 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
assert((suffix_align & prefix_align - 1) == 0, assert((suffix_align & prefix_align - 1) == 0,
"suffix_align not divisible by prefix_align"); "suffix_align not divisible by prefix_align");
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
assert(noaccess_prefix == 0 ||
noaccess_prefix == prefix_align, "noaccess prefix wrong");
// Add in noaccess_prefix to prefix_size; // Add in noaccess_prefix to prefix_size;
const size_t adjusted_prefix_size = prefix_size + noaccess_prefix; const size_t adjusted_prefix_size = prefix_size + noaccess_prefix;
const size_t size = adjusted_prefix_size + suffix_size; const size_t size = adjusted_prefix_size + suffix_size;
...@@ -150,15 +183,16 @@ ReservedSpace::ReservedSpace(const size_t prefix_size, ...@@ -150,15 +183,16 @@ ReservedSpace::ReservedSpace(const size_t prefix_size,
_noaccess_prefix = 0; _noaccess_prefix = 0;
_executable = false; _executable = false;
// Assert that if noaccess_prefix is used, it is the same as prefix_align.
assert(noaccess_prefix == 0 ||
noaccess_prefix == prefix_align, "noaccess prefix wrong");
// Optimistically try to reserve the exact size needed. // Optimistically try to reserve the exact size needed.
char* addr; char* addr;
if (requested_address != 0) { if (requested_address != 0) {
addr = os::attempt_reserve_memory_at(size, requested_address -= noaccess_prefix; // adjust address
requested_address-noaccess_prefix); assert(requested_address != NULL, "huge noaccess prefix?");
addr = os::attempt_reserve_memory_at(size, requested_address);
if (failed_to_reserve_as_requested(addr, requested_address, size, false)) {
// OS ignored requested address. Try different address.
addr = NULL;
}
} else { } else {
addr = os::reserve_memory(size, NULL, prefix_align); addr = os::reserve_memory(size, NULL, prefix_align);
} }
...@@ -222,11 +256,20 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large, ...@@ -222,11 +256,20 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
bool special = large && !os::can_commit_large_page_memory(); bool special = large && !os::can_commit_large_page_memory();
char* base = NULL; char* base = NULL;
if (requested_address != 0) {
requested_address -= noaccess_prefix; // adjust requested address
assert(requested_address != NULL, "huge noaccess prefix?");
}
if (special) { if (special) {
base = os::reserve_memory_special(size, requested_address, executable); base = os::reserve_memory_special(size, requested_address, executable);
if (base != NULL) { if (base != NULL) {
if (failed_to_reserve_as_requested(base, requested_address, size, true)) {
// OS ignored requested address. Try different address.
return;
}
// Check alignment constraints // Check alignment constraints
if (alignment > 0) { if (alignment > 0) {
assert((uintptr_t) base % alignment == 0, assert((uintptr_t) base % alignment == 0,
...@@ -235,6 +278,13 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large, ...@@ -235,6 +278,13 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
_special = true; _special = true;
} else { } else {
// failed; try to reserve regular memory below // failed; try to reserve regular memory below
if (UseLargePages && (!FLAG_IS_DEFAULT(UseLargePages) ||
!FLAG_IS_DEFAULT(LargePageSizeInBytes))) {
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Reserve regular memory without large pages.");
}
}
} }
} }
...@@ -248,8 +298,11 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large, ...@@ -248,8 +298,11 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
// important. If available space is not detected, return NULL. // important. If available space is not detected, return NULL.
if (requested_address != 0) { if (requested_address != 0) {
base = os::attempt_reserve_memory_at(size, base = os::attempt_reserve_memory_at(size, requested_address);
requested_address-noaccess_prefix); if (failed_to_reserve_as_requested(base, requested_address, size, false)) {
// OS ignored requested address. Try different address.
base = NULL;
}
} else { } else {
base = os::reserve_memory(size, NULL, alignment); base = os::reserve_memory(size, NULL, alignment);
} }
...@@ -365,7 +418,12 @@ void ReservedSpace::release() { ...@@ -365,7 +418,12 @@ void ReservedSpace::release() {
} }
void ReservedSpace::protect_noaccess_prefix(const size_t size) { void ReservedSpace::protect_noaccess_prefix(const size_t size) {
// If there is noaccess prefix, return. assert( (_noaccess_prefix != 0) == (UseCompressedOops && _base != NULL &&
(size_t(_base + _size) > OopEncodingHeapMax) &&
Universe::narrow_oop_use_implicit_null_checks()),
"noaccess_prefix should be used only with non zero based compressed oops");
// If there is no noaccess prefix, return.
if (_noaccess_prefix == 0) return; if (_noaccess_prefix == 0) return;
assert(_noaccess_prefix >= (size_t)os::vm_page_size(), assert(_noaccess_prefix >= (size_t)os::vm_page_size(),
...@@ -377,6 +435,10 @@ void ReservedSpace::protect_noaccess_prefix(const size_t size) { ...@@ -377,6 +435,10 @@ void ReservedSpace::protect_noaccess_prefix(const size_t size) {
_special)) { _special)) {
fatal("cannot protect protection page"); fatal("cannot protect protection page");
} }
if (PrintCompressedOopsMode) {
tty->cr();
tty->print_cr("Protected page at the reserved heap base: " PTR_FORMAT " / " INTX_FORMAT " bytes", _base, _noaccess_prefix);
}
_base += _noaccess_prefix; _base += _noaccess_prefix;
_size -= _noaccess_prefix; _size -= _noaccess_prefix;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册