提交 5c068227 编写于 作者: C coleenp

Merge

/*
* Copyright (c) 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -29,13 +29,13 @@ import java.io.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.types.*;
public class LowMemoryDetectorThread extends JavaThread {
public LowMemoryDetectorThread(Address addr) {
public class ServiceThread extends JavaThread {
public ServiceThread(Address addr) {
super(addr);
}
public boolean isJavaThread() { return false; }
public boolean isHiddenFromExternalView() { return true; }
public boolean isLowMemoryDetectorThread() { return true; }
public boolean isServiceThread() { return true; }
}
/*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -111,7 +111,7 @@ public class Thread extends VMObject {
public boolean isJvmtiAgentThread() { return false; }
public boolean isWatcherThread() { return false; }
public boolean isConcurrentMarkSweepThread() { return false; }
public boolean isLowMemoryDetectorThread() { return false; }
public boolean isServiceThread() { return false; }
/** Memory operations */
public void oopsDo(AddressVisitor oopVisitor) {
......
/*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -107,14 +107,14 @@ public class Threads {
// for now, use JavaThread itself. fix it later with appropriate class if needed
virtualConstructor.addMapping("SurrogateLockerThread", JavaThread.class);
virtualConstructor.addMapping("JvmtiAgentThread", JvmtiAgentThread.class);
virtualConstructor.addMapping("LowMemoryDetectorThread", LowMemoryDetectorThread.class);
virtualConstructor.addMapping("ServiceThread", ServiceThread.class);
}
public Threads() {
}
/** NOTE: this returns objects of type JavaThread, CompilerThread,
JvmtiAgentThread, and LowMemoryDetectorThread.
JvmtiAgentThread, and ServiceThread.
The latter four are subclasses of the former. Most operations
(fetching the top frame, etc.) are only allowed to be performed on
a "pure" JavaThread. For this reason, {@link
......@@ -143,7 +143,7 @@ public class Threads {
return thread;
} catch (Exception e) {
throw new RuntimeException("Unable to deduce type of thread from address " + threadAddr +
" (expected type JavaThread, CompilerThread, LowMemoryDetectorThread, JvmtiAgentThread, or SurrogateLockerThread)", e);
" (expected type JavaThread, CompilerThread, ServiceThread, JvmtiAgentThread, or SurrogateLockerThread)", e);
}
}
......
......@@ -34,6 +34,7 @@
#include "interpreter/bytecode.hpp"
#include "oops/methodDataOop.hpp"
#include "prims/jvmtiRedefineClassesTrace.hpp"
#include "prims/jvmtiImpl.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/sweeper.hpp"
#include "utilities/dtrace.hpp"
......@@ -1533,7 +1534,10 @@ void nmethod::post_compiled_method_load_event() {
}
if (JvmtiExport::should_post_compiled_method_load()) {
JvmtiExport::post_compiled_method_load(this);
// Let the Service thread (which is a real Java thread) post the event
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
JvmtiDeferredEventQueue::enqueue(
JvmtiDeferredEvent::compiled_method_load_event(this));
}
}
......@@ -1566,8 +1570,17 @@ void nmethod::post_compiled_method_unload() {
// ref will have been cleared.
if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) {
assert(!unload_reported(), "already unloaded");
HandleMark hm;
JvmtiExport::post_compiled_method_unload(_jmethod_id, insts_begin());
JvmtiDeferredEvent event =
JvmtiDeferredEvent::compiled_method_unload_event(
_jmethod_id, insts_begin());
if (SafepointSynchronize::is_at_safepoint()) {
// Don't want to take the queueing lock. Add it as pending and
// it will get enqueued later.
JvmtiDeferredEventQueue::add_pending_event(event);
} else {
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
JvmtiDeferredEventQueue::enqueue(event);
}
}
// The JVMTI CompiledMethodUnload event can be enabled or disabled at
......
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -696,10 +696,11 @@ public:
class nmethodLocker : public StackObj {
nmethod* _nm;
public:
static void lock_nmethod(nmethod* nm); // note: nm can be NULL
static void unlock_nmethod(nmethod* nm); // (ditto)
public:
nmethodLocker(address pc); // derive nm from pc
nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
nmethodLocker() { _nm = NULL; }
......
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -750,15 +750,12 @@ public:
// pending CompiledMethodUnload support
//
bool JvmtiExport::_have_pending_compiled_method_unload_events;
GrowableArray<jmethodID>* JvmtiExport::_pending_compiled_method_unload_method_ids;
GrowableArray<const void *>* JvmtiExport::_pending_compiled_method_unload_code_begins;
JavaThread* JvmtiExport::_current_poster;
void JvmtiExport::post_compiled_method_unload_internal(JavaThread* self, jmethodID method, const void *code_begin) {
void JvmtiExport::post_compiled_method_unload(
jmethodID method, const void *code_begin) {
JavaThread* thread = JavaThread::current();
EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
("JVMTI [%s] method compile unload event triggered",
JvmtiTrace::safe_get_thread_name(self)));
JvmtiTrace::safe_get_thread_name(thread)));
// post the event for each environment that has this event enabled.
JvmtiEnvIterator it;
......@@ -767,12 +764,12 @@ void JvmtiExport::post_compiled_method_unload_internal(JavaThread* self, jmethod
EVT_TRACE(JVMTI_EVENT_COMPILED_METHOD_UNLOAD,
("JVMTI [%s] class compile method unload event sent jmethodID " PTR_FORMAT,
JvmtiTrace::safe_get_thread_name(self), method));
JvmtiTrace::safe_get_thread_name(thread), method));
ResourceMark rm(self);
ResourceMark rm(thread);
JvmtiEventMark jem(self);
JvmtiJavaThreadEventTransition jet(self);
JvmtiEventMark jem(thread);
JvmtiJavaThreadEventTransition jet(thread);
jvmtiEventCompiledMethodUnload callback = env->callbacks()->CompiledMethodUnload;
if (callback != NULL) {
(*callback)(env->jvmti_external(), method, code_begin);
......@@ -781,90 +778,6 @@ void JvmtiExport::post_compiled_method_unload_internal(JavaThread* self, jmethod
}
}
// post any pending CompiledMethodUnload events
void JvmtiExport::post_pending_compiled_method_unload_events() {
JavaThread* self = JavaThread::current();
assert(!self->owns_locks(), "can't hold locks");
// Indicates if this is the first activiation of this function.
// In theory the profiler's callback could call back into VM and provoke
// another CompiledMethodLoad event to be posted from this thread. As the
// stack rewinds we need to ensure that the original activation does the
// completion and notifies any waiters.
bool first_activation = false;
// the jmethodID (may not be valid) to be used for a single event
jmethodID method;
const void *code_begin;
// grab the monitor and check if another thread is already posting
// events. If there is another thread posting events then we wait
// until it completes. (In theory we could check the pending events to
// see if any of the addresses overlap with the event that we want to
// post but as it will happen so rarely we just block any thread waiting
// to post a CompiledMethodLoad or DynamicCodeGenerated event until all
// pending CompiledMethodUnload events have been posted).
//
// If another thread isn't posting we examine the list of pending jmethodIDs.
// If the list is empty then we are done. If it's not empty then this thread
// (self) becomes the pending event poster and we remove the top (last)
// event from the list. Note that this means we remove the newest event first
// but as they are all CompiledMethodUnload events the order doesn't matter.
// Once we have removed a jmethodID then we exit the monitor. Any other thread
// wanting to post a CompiledMethodLoad or DynamicCodeGenerated event will
// be forced to wait on the monitor.
{
MutexLocker mu(JvmtiPendingEvent_lock);
if (_current_poster != self) {
while (_current_poster != NULL) {
JvmtiPendingEvent_lock->wait();
}
}
if ((_pending_compiled_method_unload_method_ids == NULL) ||
(_pending_compiled_method_unload_method_ids->length() == 0)) {
return;
}
if (_current_poster == NULL) {
_current_poster = self;
first_activation = true;
} else {
// re-entrant
guarantee(_current_poster == self, "checking");
}
method = _pending_compiled_method_unload_method_ids->pop();
code_begin = _pending_compiled_method_unload_code_begins->pop();
}
// This thread is the pending event poster so it first posts the CompiledMethodUnload
// event for the jmethodID that has been removed from the list. Once posted it
// re-grabs the monitor and checks the list again. If the list is empty then and this
// is the first activation of the function then we reset the _have_pending_events
// flag, cleanup _current_poster to indicate that no thread is now servicing the
// pending events list, and finally notify any thread that might be waiting.
for (;;) {
post_compiled_method_unload_internal(self, method, code_begin);
// event posted, now re-grab monitor and get the next event
// If there's no next event then we are done. If this is the first
// activiation of this function by this thread notify any waiters
// so that they can post.
{
MutexLocker ml(JvmtiPendingEvent_lock);
if (_pending_compiled_method_unload_method_ids->length() == 0) {
if (first_activation) {
_have_pending_compiled_method_unload_events = false;
_current_poster = NULL;
JvmtiPendingEvent_lock->notify_all();
}
return;
}
method = _pending_compiled_method_unload_method_ids->pop();
code_begin = _pending_compiled_method_unload_code_begins->pop();
}
}
}
///////////////////////////////////////////////////////////////
//
// JvmtiExport
......@@ -1830,16 +1743,7 @@ jvmtiCompiledMethodLoadInlineRecord* create_inline_record(nmethod* nm) {
}
void JvmtiExport::post_compiled_method_load(nmethod *nm) {
// If there are pending CompiledMethodUnload events then these are
// posted before this CompiledMethodLoad event. We "lock" the nmethod and
// maintain a handle to the methodOop to ensure that the nmethod isn't
// flushed or unloaded while posting the events.
JavaThread* thread = JavaThread::current();
if (have_pending_compiled_method_unload_events()) {
methodHandle mh(thread, nm->method());
nmethodLocker nml(nm);
post_pending_compiled_method_unload_events();
}
EVT_TRIG_TRACE(JVMTI_EVENT_COMPILED_METHOD_LOAD,
("JVMTI [%s] method compile load event triggered",
......@@ -1854,8 +1758,8 @@ void JvmtiExport::post_compiled_method_load(nmethod *nm) {
JvmtiTrace::safe_get_thread_name(thread),
(nm->method() == NULL) ? "NULL" : nm->method()->klass_name()->as_C_string(),
(nm->method() == NULL) ? "NULL" : nm->method()->name()->as_C_string()));
ResourceMark rm(thread);
HandleMark hm(thread);
// Add inlining information
jvmtiCompiledMethodLoadInlineRecord* inlinerecord = create_inline_record(nm);
......@@ -1899,28 +1803,6 @@ void JvmtiExport::post_compiled_method_load(JvmtiEnv* env, const jmethodID metho
}
}
// used at a safepoint to post a CompiledMethodUnload event
void JvmtiExport::post_compiled_method_unload(jmethodID mid, const void *code_begin) {
if (SafepointSynchronize::is_at_safepoint()) {
// Class unloading can cause nmethod unloading which is reported
// by the VMThread. These must be batched to be processed later.
if (_pending_compiled_method_unload_method_ids == NULL) {
// create list lazily
_pending_compiled_method_unload_method_ids = new (ResourceObj::C_HEAP) GrowableArray<jmethodID>(10,true);
_pending_compiled_method_unload_code_begins = new (ResourceObj::C_HEAP) GrowableArray<const void *>(10,true);
}
_pending_compiled_method_unload_method_ids->append(mid);
_pending_compiled_method_unload_code_begins->append(code_begin);
_have_pending_compiled_method_unload_events = true;
} else {
// Unloading caused by the sweeper can be reported synchronously.
if (have_pending_compiled_method_unload_events()) {
post_pending_compiled_method_unload_events();
}
post_compiled_method_unload_internal(JavaThread::current(), mid, code_begin);
}
}
void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) {
JavaThread* thread = JavaThread::current();
EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED,
......@@ -1953,9 +1835,9 @@ void JvmtiExport::post_dynamic_code_generated(const char *name, const void *code
return;
}
if (have_pending_compiled_method_unload_events()) {
post_pending_compiled_method_unload_events();
}
// Blocks until everything now in the queue has been posted
JvmtiDeferredEventQueue::flush_queue(Thread::current());
post_dynamic_code_generated_internal(name, code_begin, code_end);
}
......
/*
* Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -141,25 +141,6 @@ class JvmtiExport : public AllStatic {
private:
// CompiledMethodUnload events are reported from the VM thread so they
// are collected in lists (of jmethodID/addresses) and the events are posted later
// from threads posting CompieldMethodLoad or DynamicCodeGenerated events.
static bool _have_pending_compiled_method_unload_events;
static GrowableArray<jmethodID>* _pending_compiled_method_unload_method_ids;
static GrowableArray<const void *>* _pending_compiled_method_unload_code_begins;
static JavaThread* _current_poster;
// tests if there are CompiledMethodUnload events pending
inline static bool have_pending_compiled_method_unload_events() {
return _have_pending_compiled_method_unload_events;
}
// posts any pending CompiledMethodUnload events.
static void post_pending_compiled_method_unload_events();
// Perform the actual notification to interested JvmtiEnvs.
static void post_compiled_method_unload_internal(JavaThread* self, jmethodID mid, const void* code_begin);
// posts a DynamicCodeGenerated event (internal/private implementation).
// The public post_dynamic_code_generated* functions make use of the
// internal implementation.
......@@ -256,7 +237,7 @@ class JvmtiExport : public AllStatic {
// single stepping management methods
static void at_single_stepping_point(JavaThread *thread, methodOop method, address location) KERNEL_RETURN;
static void expose_single_stepping(JavaThread *thread) KERNEL_RETURN;
static bool hide_single_stepping(JavaThread *thread) KERNEL_RETURN_(return false;);
static bool hide_single_stepping(JavaThread *thread) KERNEL_RETURN_(false);
// Methods that notify the debugger that something interesting has happened in the VM.
static void post_vm_start ();
......@@ -271,20 +252,20 @@ class JvmtiExport : public AllStatic {
static oop jni_GetField_probe (JavaThread *thread, jobject jobj,
oop obj, klassOop klass, jfieldID fieldID, bool is_static)
KERNEL_RETURN_(return NULL;);
KERNEL_RETURN_(NULL);
static oop jni_GetField_probe_nh (JavaThread *thread, jobject jobj,
oop obj, klassOop klass, jfieldID fieldID, bool is_static)
KERNEL_RETURN_(return NULL;);
KERNEL_RETURN_(NULL);
static void post_field_access_by_jni (JavaThread *thread, oop obj,
klassOop klass, jfieldID fieldID, bool is_static) KERNEL_RETURN;
static void post_field_access (JavaThread *thread, methodOop method,
address location, KlassHandle field_klass, Handle object, jfieldID field) KERNEL_RETURN;
static oop jni_SetField_probe (JavaThread *thread, jobject jobj,
oop obj, klassOop klass, jfieldID fieldID, bool is_static, char sig_type,
jvalue *value) KERNEL_RETURN_(return NULL;);
jvalue *value) KERNEL_RETURN_(NULL);
static oop jni_SetField_probe_nh (JavaThread *thread, jobject jobj,
oop obj, klassOop klass, jfieldID fieldID, bool is_static, char sig_type,
jvalue *value) KERNEL_RETURN_(return NULL;);
jvalue *value) KERNEL_RETURN_(NULL);
static void post_field_modification_by_jni(JavaThread *thread, oop obj,
klassOop klass, jfieldID fieldID, bool is_static, char sig_type,
jvalue *value);
......
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -32,11 +32,13 @@
#include "prims/jvmtiEventController.inline.hpp"
#include "prims/jvmtiImpl.hpp"
#include "prims/jvmtiRedefineClasses.hpp"
#include "runtime/atomic.hpp"
#include "runtime/deoptimization.hpp"
#include "runtime/handles.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/signature.hpp"
#include "runtime/vframe.hpp"
#include "runtime/vframe_hp.hpp"
......@@ -910,3 +912,207 @@ void JvmtiSuspendControl::print() {
tty->print_cr("]");
#endif
}
#ifndef KERNEL
JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event(
nmethod* nm) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD);
event.set_compiled_method_load(nm);
nmethodLocker::lock_nmethod(nm); // will be unlocked when posted
return event;
}
JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event(
jmethodID id, const void* code) {
JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD);
event.set_compiled_method_unload(id, code);
return event;
}
void JvmtiDeferredEvent::post() {
switch(_type) {
case TYPE_COMPILED_METHOD_LOAD:
JvmtiExport::post_compiled_method_load(compiled_method_load());
nmethodLocker::unlock_nmethod(compiled_method_load());
break;
case TYPE_COMPILED_METHOD_UNLOAD:
JvmtiExport::post_compiled_method_unload(
compiled_method_unload_method_id(),
compiled_method_unload_code_begin());
break;
case TYPE_FLUSH:
JvmtiDeferredEventQueue::flush_complete(flush_state_addr());
break;
default:
ShouldNotReachHere();
}
}
JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_tail = NULL;
JvmtiDeferredEventQueue::QueueNode* JvmtiDeferredEventQueue::_queue_head = NULL;
volatile JvmtiDeferredEventQueue::QueueNode*
JvmtiDeferredEventQueue::_pending_list = NULL;
bool JvmtiDeferredEventQueue::has_events() {
assert(Service_lock->owned_by_self(), "Must own Service_lock");
return _queue_head != NULL || _pending_list != NULL;
}
void JvmtiDeferredEventQueue::enqueue(const JvmtiDeferredEvent& event) {
assert(Service_lock->owned_by_self(), "Must own Service_lock");
process_pending_events();
// Events get added to the end of the queue (and are pulled off the front).
QueueNode* node = new QueueNode(event);
if (_queue_tail == NULL) {
_queue_tail = _queue_head = node;
} else {
assert(_queue_tail->next() == NULL, "Must be the last element in the list");
_queue_tail->set_next(node);
_queue_tail = node;
}
Service_lock->notify_all();
assert((_queue_head == NULL) == (_queue_tail == NULL),
"Inconsistent queue markers");
}
JvmtiDeferredEvent JvmtiDeferredEventQueue::dequeue() {
assert(Service_lock->owned_by_self(), "Must own Service_lock");
process_pending_events();
assert(_queue_head != NULL, "Nothing to dequeue");
if (_queue_head == NULL) {
// Just in case this happens in product; it shouldn't but let's not crash
return JvmtiDeferredEvent();
}
QueueNode* node = _queue_head;
_queue_head = _queue_head->next();
if (_queue_head == NULL) {
_queue_tail = NULL;
}
assert((_queue_head == NULL) == (_queue_tail == NULL),
"Inconsistent queue markers");
JvmtiDeferredEvent event = node->event();
delete node;
return event;
}
void JvmtiDeferredEventQueue::add_pending_event(
const JvmtiDeferredEvent& event) {
QueueNode* node = new QueueNode(event);
bool success = false;
QueueNode* prev_value = (QueueNode*)_pending_list;
do {
node->set_next(prev_value);
prev_value = (QueueNode*)Atomic::cmpxchg_ptr(
(void*)node, (volatile void*)&_pending_list, (void*)node->next());
} while (prev_value != node->next());
}
// This method transfers any events that were added by someone NOT holding
// the lock into the mainline queue.
void JvmtiDeferredEventQueue::process_pending_events() {
assert(Service_lock->owned_by_self(), "Must own Service_lock");
if (_pending_list != NULL) {
QueueNode* head =
(QueueNode*)Atomic::xchg_ptr(NULL, (volatile void*)&_pending_list);
assert((_queue_head == NULL) == (_queue_tail == NULL),
"Inconsistent queue markers");
if (head != NULL) {
// Since we've treated the pending list as a stack (with newer
// events at the beginning), we need to join the bottom of the stack
// with the 'tail' of the queue in order to get the events in the
// right order. We do this by reversing the pending list and appending
// it to the queue.
QueueNode* new_tail = head;
QueueNode* new_head = NULL;
// This reverses the list
QueueNode* prev = new_tail;
QueueNode* node = new_tail->next();
new_tail->set_next(NULL);
while (node != NULL) {
QueueNode* next = node->next();
node->set_next(prev);
prev = node;
node = next;
}
new_head = prev;
// Now append the new list to the queue
if (_queue_tail != NULL) {
_queue_tail->set_next(new_head);
} else { // _queue_head == NULL
_queue_head = new_head;
}
_queue_tail = new_tail;
}
}
}
enum {
// Random - used for debugging
FLUSHING = 0x50403020,
FLUSHED = 0x09080706
};
void JvmtiDeferredEventQueue::flush_queue(Thread* thread) {
volatile int flush_state = FLUSHING;
JvmtiDeferredEvent flush(JvmtiDeferredEvent::TYPE_FLUSH);
flush.set_flush_state_addr((int*)&flush_state);
if (ServiceThread::is_service_thread(thread)) {
// If we are the service thread we have to post all preceding events
// Use the flush event as a token to indicate when we can stop
JvmtiDeferredEvent event;
{
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
enqueue(flush);
event = dequeue();
}
while (!event.is_flush_event() ||
event.flush_state_addr() != &flush_state) {
event.post();
{
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
event = dequeue();
}
}
} else {
// Wake up the service thread so it will process events. When it gets
// to the flush event it will set 'flush_complete' and notify us.
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
enqueue(flush);
while (flush_state != FLUSHED) {
assert(flush_state == FLUSHING || flush_state == FLUSHED,
"only valid values for this");
Service_lock->wait(Mutex::_no_safepoint_check_flag);
}
}
}
void JvmtiDeferredEventQueue::flush_complete(int* state_addr) {
assert(state_addr != NULL && *state_addr == FLUSHING, "must be");
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
*state_addr = FLUSHED;
Service_lock->notify_all();
}
#endif // ndef KERNEL
/*
* Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -433,6 +433,149 @@ public:
#endif // !JVMTI_KERNEL
/**
* When a thread (such as the compiler thread or VM thread) cannot post a
* JVMTI event itself because the event needs to be posted from a Java
* thread, then it can defer the event to the Service thread for posting.
* The information needed to post the event is encapsulated into this class
* and then enqueued onto the JvmtiDeferredEventQueue, where the Service
* thread will pick it up and post it.
*
* This is currently only used for posting compiled-method-load and unload
* events, which we don't want posted from the compiler thread.
*/
class JvmtiDeferredEvent VALUE_OBJ_CLASS_SPEC {
friend class JvmtiDeferredEventQueue;
private:
typedef enum {
TYPE_NONE,
TYPE_COMPILED_METHOD_LOAD,
TYPE_COMPILED_METHOD_UNLOAD,
TYPE_FLUSH // pseudo-event used to implement flush_queue()
} Type;
Type _type;
union {
nmethod* compiled_method_load;
struct {
jmethodID method_id;
const void* code_begin;
} compiled_method_unload;
int* flush_state_addr;
} _event_data;
JvmtiDeferredEvent(Type t) : _type(t) {}
void set_compiled_method_load(nmethod* nm) {
assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be");
_event_data.compiled_method_load = nm;
}
nmethod* compiled_method_load() const {
assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be");
return _event_data.compiled_method_load;
}
void set_compiled_method_unload(jmethodID id, const void* code) {
assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be");
_event_data.compiled_method_unload.method_id = id;
_event_data.compiled_method_unload.code_begin = code;
}
jmethodID compiled_method_unload_method_id() const {
assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be");
return _event_data.compiled_method_unload.method_id;
}
const void* compiled_method_unload_code_begin() const {
assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be");
return _event_data.compiled_method_unload.code_begin;
}
bool is_flush_event() const { return _type == TYPE_FLUSH; }
int* flush_state_addr() const {
assert(is_flush_event(), "must be");
return _event_data.flush_state_addr;
}
void set_flush_state_addr(int* flag) {
assert(is_flush_event(), "must be");
_event_data.flush_state_addr = flag;
}
public:
JvmtiDeferredEvent() : _type(TYPE_NONE) {}
// Factory methods
static JvmtiDeferredEvent compiled_method_load_event(nmethod* nm)
KERNEL_RETURN_(JvmtiDeferredEvent());
static JvmtiDeferredEvent compiled_method_unload_event(
jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent());
// Actually posts the event.
void post() KERNEL_RETURN;
};
/**
* Events enqueued on this queue wake up the Service thread which dequeues
* and posts the events. The Service_lock is required to be held
* when operating on the queue (except for the "pending" events).
*/
class JvmtiDeferredEventQueue : AllStatic {
friend class JvmtiDeferredEvent;
private:
class QueueNode : public CHeapObj {
private:
JvmtiDeferredEvent _event;
QueueNode* _next;
public:
QueueNode(const JvmtiDeferredEvent& event)
: _event(event), _next(NULL) {}
const JvmtiDeferredEvent& event() const { return _event; }
QueueNode* next() const { return _next; }
void set_next(QueueNode* next) { _next = next; }
};
static QueueNode* _queue_head; // Hold Service_lock to access
static QueueNode* _queue_tail; // Hold Service_lock to access
static volatile QueueNode* _pending_list; // Uses CAS for read/update
// Transfers events from the _pending_list to the _queue.
static void process_pending_events() KERNEL_RETURN;
static void flush_complete(int* flush_state) KERNEL_RETURN;
public:
// Must be holding Service_lock when calling these
static bool has_events() KERNEL_RETURN_(false);
static void enqueue(const JvmtiDeferredEvent& event) KERNEL_RETURN;
static JvmtiDeferredEvent dequeue() KERNEL_RETURN_(JvmtiDeferredEvent());
// This call blocks until all events enqueued prior to this call
// have been posted. The Service_lock is acquired and waited upon.
//
// Implemented by creating a "flush" event and placing it in the queue.
// When the flush event is "posted" it will call flush_complete(), which
// will release the caller.
//
// Can be called by any thread (maybe even the service thread itself).
// Not necessary for the caller to be a JavaThread.
static void flush_queue(Thread* current) KERNEL_RETURN;
// Used to enqueue events without using a lock, for times (such as during
// safepoint) when we can't or don't want to lock the Service_lock.
//
// Events will be held off to the side until there's a call to
// dequeue(), enqueue(), or process_pending_events() (all of which require
// the holding of the Service_lock), and will be enqueued at that time.
static void add_pending_event(const JvmtiDeferredEvent&) KERNEL_RETURN;
};
// Utility macro that checks for NULL pointers:
#define NULL_CHECK(X, Y) if ((X) == NULL) { return (Y); }
......
......@@ -231,13 +231,13 @@ public:
static void engage(JavaThread* mainThread, bool fullProfile) KERNEL_RETURN ;
static void disengage() KERNEL_RETURN ;
static void print(int unused) KERNEL_RETURN ;
static bool is_active() KERNEL_RETURN_(return false;) ;
static bool is_active() KERNEL_RETURN_(false) ;
// This is NULL if each thread has its own thread profiler,
// else this is the single thread profiler used by all threads.
// In particular it makes a difference during garbage collection,
// where you only want to traverse each thread profiler once.
static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(return NULL;);
static ThreadProfiler* get_thread_profiler() KERNEL_RETURN_(NULL);
// Garbage Collection Support
static void oops_do(OopClosure* f) KERNEL_RETURN ;
......@@ -246,13 +246,13 @@ public:
// Returns the start address for a given pc
// NULL is returned if the PCRecorder is inactive
static address bucket_start_for(address pc) KERNEL_RETURN_(return NULL;);
static address bucket_start_for(address pc) KERNEL_RETURN_(NULL);
enum { MillisecsPerTick = 10 }; // ms per profiling ticks
// Returns the number of ticks recorded for the bucket
// pc belongs to.
static int bucket_count_for(address pc) KERNEL_RETURN_(return 0;);
static int bucket_count_for(address pc) KERNEL_RETURN_(0);
#ifndef FPROF_KERNEL
......
......@@ -129,7 +129,7 @@ Mutex* HotCardCache_lock = NULL;
Monitor* GCTaskManager_lock = NULL;
Mutex* Management_lock = NULL;
Monitor* LowMemory_lock = NULL;
Monitor* Service_lock = NULL;
#define MAX_NUM_MUTEX 128
static Monitor * _mutex_array[MAX_NUM_MUTEX];
......@@ -203,7 +203,7 @@ void mutex_init() {
def(Patching_lock , Mutex , special, true ); // used for safepointing and code patching.
def(ObjAllocPost_lock , Monitor, special, false);
def(LowMemory_lock , Monitor, special, true ); // used for low memory detection
def(Service_lock , Monitor, special, true ); // used for service thread operations
def(JmethodIdCreation_lock , Mutex , leaf, true ); // used for creating jmethodIDs.
def(SystemDictionary_lock , Monitor, leaf, true ); // lookups done by VM thread
......
......@@ -131,7 +131,7 @@ extern Mutex* MMUTracker_lock; // protects the MMU
extern Mutex* HotCardCache_lock; // protects the hot card cache
extern Mutex* Management_lock; // a lock used to serialize JVM management
extern Monitor* LowMemory_lock; // a lock used for low memory detection
extern Monitor* Service_lock; // a lock used for service thread operation
// A MutexLocker provides mutual exclusion with respect to a given mutex
// for the scope which contains the locker. The lock is an OS lock, not
......
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/interfaceSupport.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/mutexLocker.hpp"
#include "prims/jvmtiImpl.hpp"
ServiceThread* ServiceThread::_instance = NULL;
void ServiceThread::initialize() {
EXCEPTION_MARK;
instanceKlassHandle klass (THREAD, SystemDictionary::Thread_klass());
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
const char* name = JDK_Version::is_gte_jdk17x_version() ?
"Service Thread" : "Low Memory Detector";
Handle string = java_lang_String::create_from_str(name, CHECK);
// Initialize thread_oop to put it into the system threadGroup
Handle thread_group (THREAD, Universe::system_thread_group());
JavaValue result(T_VOID);
JavaCalls::call_special(&result, thread_oop,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
CHECK);
{
MutexLocker mu(Threads_lock);
ServiceThread* thread = new ServiceThread(&service_thread_entry);
// At this point it may be possible that no osthread was created for the
// JavaThread due to lack of memory. We would have to throw an exception
// in that case. However, since this must work and we do not allow
// exceptions anyway, check and abort if this fails.
if (thread == NULL || thread->osthread() == NULL) {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
"unable to create new native thread");
}
java_lang_Thread::set_thread(thread_oop(), thread);
java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
java_lang_Thread::set_daemon(thread_oop());
thread->set_threadObj(thread_oop());
Threads::add(thread);
Thread::start(thread);
_instance = thread;
}
}
void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
while (true) {
bool sensors_changed = false;
bool has_jvmti_events = false;
JvmtiDeferredEvent jvmti_event;
{
// Need state transition ThreadBlockInVM so that this thread
// will be handled by safepoint correctly when this thread is
// notified at a safepoint.
// This ThreadBlockInVM object is not also considered to be
// suspend-equivalent because ServiceThread is not visible to
// external suspension.
ThreadBlockInVM tbivm(jt);
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
while (!(sensors_changed = LowMemoryDetector::has_pending_requests()) &&
!(has_jvmti_events = JvmtiDeferredEventQueue::has_events())) {
// wait until one of the sensors has pending requests, or there is a
// pending JVMTI event to post
Service_lock->wait(Mutex::_no_safepoint_check_flag);
}
if (has_jvmti_events) {
jvmti_event = JvmtiDeferredEventQueue::dequeue();
}
}
if (has_jvmti_events) {
jvmti_event.post();
}
if (sensors_changed) {
LowMemoryDetector::process_sensor_changes(jt);
}
}
}
bool ServiceThread::is_service_thread(Thread* thread) {
return thread == _instance;
}
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_RUNTIME_SERVICETHREAD_HPP
#define SHARE_VM_RUNTIME_SERVICETHREAD_HPP
#include "runtime/thread.hpp"
// A JavaThread for low memory detection support and JVMTI
// compiled-method-load events.
class ServiceThread : public JavaThread {
friend class VMStructs;
private:
static ServiceThread* _instance;
static void service_thread_entry(JavaThread* thread, TRAPS);
ServiceThread(ThreadFunction entry_point) : JavaThread(entry_point) {};
public:
static void initialize();
// Hide this thread from external view.
bool is_hidden_from_external_view() const { return true; }
// Returns true if the passed thread is the service thread.
static bool is_service_thread(Thread* thread);
};
#endif // SHARE_VM_RUNTIME_SERVICETHREAD_HPP
......@@ -1680,16 +1680,6 @@ inline size_t JavaThread::stack_available(address cur_sp) {
return cur_sp > low_addr ? cur_sp - low_addr : 0;
}
// A JavaThread for low memory detection support
class LowMemoryDetectorThread : public JavaThread {
friend class VMStructs;
public:
LowMemoryDetectorThread(ThreadFunction entry_point) : JavaThread(entry_point) {};
// Hide this thread from external view.
bool is_hidden_from_external_view() const { return true; }
};
// A thread used for Compilation.
class CompilerThread : public JavaThread {
friend class VMStructs;
......
......@@ -93,6 +93,7 @@
#include "runtime/java.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/perfMemory.hpp"
#include "runtime/serviceThread.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/virtualspace.hpp"
......@@ -1250,7 +1251,7 @@ static inline uint64_t cast_uint64_t(size_t x)
declare_type(WatcherThread, Thread) \
declare_type(JavaThread, Thread) \
declare_type(JvmtiAgentThread, JavaThread) \
declare_type(LowMemoryDetectorThread, JavaThread) \
declare_type(ServiceThread, JavaThread) \
declare_type(CompilerThread, JavaThread) \
declare_toplevel_type(OSThread) \
declare_toplevel_type(JavaFrameAnchor) \
......
......@@ -59,10 +59,10 @@ class AttachListener: AllStatic {
static void detachall() KERNEL_RETURN;
// indicates if the Attach Listener needs to be created at startup
static bool init_at_startup() KERNEL_RETURN_(return false;);
static bool init_at_startup() KERNEL_RETURN_(false);
// indicates if we have a trigger to start the Attach Listener
static bool is_init_trigger() KERNEL_RETURN_(return false;);
static bool is_init_trigger() KERNEL_RETURN_(false);
#ifdef SERVICES_KERNEL
static bool is_attach_supported() { return false; }
......
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -34,55 +34,11 @@
#include "services/lowMemoryDetector.hpp"
#include "services/management.hpp"
LowMemoryDetectorThread* LowMemoryDetector::_detector_thread = NULL;
volatile bool LowMemoryDetector::_enabled_for_collected_pools = false;
volatile jint LowMemoryDetector::_disabled_count = 0;
void LowMemoryDetector::initialize() {
EXCEPTION_MARK;
instanceKlassHandle klass (THREAD, SystemDictionary::Thread_klass());
instanceHandle thread_oop = klass->allocate_instance_handle(CHECK);
const char thread_name[] = "Low Memory Detector";
Handle string = java_lang_String::create_from_str(thread_name, CHECK);
// Initialize thread_oop to put it into the system threadGroup
Handle thread_group (THREAD, Universe::system_thread_group());
JavaValue result(T_VOID);
JavaCalls::call_special(&result, thread_oop,
klass,
vmSymbols::object_initializer_name(),
vmSymbols::threadgroup_string_void_signature(),
thread_group,
string,
CHECK);
{
MutexLocker mu(Threads_lock);
_detector_thread = new LowMemoryDetectorThread(&low_memory_detector_thread_entry);
// At this point it may be possible that no osthread was created for the
// JavaThread due to lack of memory. We would have to throw an exception
// in that case. However, since this must work and we do not allow
// exceptions anyway, check and abort if this fails.
if (_detector_thread == NULL || _detector_thread->osthread() == NULL) {
vm_exit_during_initialization("java.lang.OutOfMemoryError",
"unable to create new native thread");
}
java_lang_Thread::set_thread(thread_oop(), _detector_thread);
java_lang_Thread::set_priority(thread_oop(), NearMaxPriority);
java_lang_Thread::set_daemon(thread_oop());
_detector_thread->set_threadObj(thread_oop());
Threads::add(_detector_thread);
Thread::start(_detector_thread);
}
}
bool LowMemoryDetector::has_pending_requests() {
assert(LowMemory_lock->owned_by_self(), "Must own LowMemory_lock");
assert(Service_lock->owned_by_self(), "Must own Service_lock");
bool has_requests = false;
int num_memory_pools = MemoryService::num_memory_pools();
for (int i = 0; i < num_memory_pools; i++) {
......@@ -100,47 +56,21 @@ bool LowMemoryDetector::has_pending_requests() {
return has_requests;
}
void LowMemoryDetector::low_memory_detector_thread_entry(JavaThread* jt, TRAPS) {
while (true) {
bool sensors_changed = false;
{
// _no_safepoint_check_flag is used here as LowMemory_lock is a
// special lock and the VMThread may acquire this lock at safepoint.
// Need state transition ThreadBlockInVM so that this thread
// will be handled by safepoint correctly when this thread is
// notified at a safepoint.
// This ThreadBlockInVM object is not also considered to be
// suspend-equivalent because LowMemoryDetector threads are
// not visible to external suspension.
void LowMemoryDetector::process_sensor_changes(TRAPS) {
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
ThreadBlockInVM tbivm(jt);
MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
while (!(sensors_changed = has_pending_requests())) {
// wait until one of the sensors has pending requests
LowMemory_lock->wait(Mutex::_no_safepoint_check_flag);
}
// No need to hold Service_lock to call out to Java
int num_memory_pools = MemoryService::num_memory_pools();
for (int i = 0; i < num_memory_pools; i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
SensorInfo* sensor = pool->usage_sensor();
SensorInfo* gc_sensor = pool->gc_usage_sensor();
if (sensor != NULL && sensor->has_pending_requests()) {
sensor->process_pending_requests(CHECK);
}
{
ResourceMark rm(THREAD);
HandleMark hm(THREAD);
// No need to hold LowMemory_lock to call out to Java
int num_memory_pools = MemoryService::num_memory_pools();
for (int i = 0; i < num_memory_pools; i++) {
MemoryPool* pool = MemoryService::get_memory_pool(i);
SensorInfo* sensor = pool->usage_sensor();
SensorInfo* gc_sensor = pool->gc_usage_sensor();
if (sensor != NULL && sensor->has_pending_requests()) {
sensor->process_pending_requests(CHECK);
}
if (gc_sensor != NULL && gc_sensor->has_pending_requests()) {
gc_sensor->process_pending_requests(CHECK);
}
}
if (gc_sensor != NULL && gc_sensor->has_pending_requests()) {
gc_sensor->process_pending_requests(CHECK);
}
}
}
......@@ -148,7 +78,7 @@ void LowMemoryDetector::low_memory_detector_thread_entry(JavaThread* jt, TRAPS)
// This method could be called from any Java threads
// and also VMThread.
void LowMemoryDetector::detect_low_memory() {
MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
bool has_pending_requests = false;
int num_memory_pools = MemoryService::num_memory_pools();
......@@ -166,7 +96,7 @@ void LowMemoryDetector::detect_low_memory() {
}
if (has_pending_requests) {
LowMemory_lock->notify_all();
Service_lock->notify_all();
}
}
......@@ -181,14 +111,14 @@ void LowMemoryDetector::detect_low_memory(MemoryPool* pool) {
}
{
MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MemoryUsage usage = pool->get_memory_usage();
sensor->set_gauge_sensor_level(usage,
pool->usage_threshold());
if (sensor->has_pending_requests()) {
// notify sensor state update
LowMemory_lock->notify_all();
Service_lock->notify_all();
}
}
}
......@@ -203,14 +133,14 @@ void LowMemoryDetector::detect_after_gc_memory(MemoryPool* pool) {
}
{
MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
MemoryUsage usage = pool->get_last_collection_usage();
sensor->set_counter_sensor_level(usage, pool->gc_usage_threshold());
if (sensor->has_pending_requests()) {
// notify sensor state update
LowMemory_lock->notify_all();
Service_lock->notify_all();
}
}
}
......@@ -384,8 +314,8 @@ void SensorInfo::trigger(int count, TRAPS) {
}
{
// Holds LowMemory_lock and update the sensor state
MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
// Holds Service_lock and update the sensor state
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
_sensor_on = true;
_sensor_count += count;
_pending_trigger_count = _pending_trigger_count - count;
......@@ -410,8 +340,8 @@ void SensorInfo::clear(int count, TRAPS) {
}
{
// Holds LowMemory_lock and update the sensor state
MutexLockerEx ml(LowMemory_lock, Mutex::_no_safepoint_check_flag);
// Holds Service_lock and update the sensor state
MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
_sensor_on = false;
_pending_clear_count = 0;
_pending_trigger_count = _pending_trigger_count - count;
......
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -58,8 +58,8 @@
//
// May need to deal with hysteresis effect.
//
// Memory detection code runs in the Service thread (serviceThread.hpp).
class LowMemoryDetectorThread;
class OopClosure;
class MemoryPool;
......@@ -211,23 +211,22 @@ public:
};
class LowMemoryDetector : public AllStatic {
friend class LowMemoryDetectorDisabler;
friend class LowMemoryDetectorDisabler;
friend class ServiceThread;
private:
// true if any collected heap has low memory detection enabled
static volatile bool _enabled_for_collected_pools;
// > 0 if temporary disabed
static volatile jint _disabled_count;
static LowMemoryDetectorThread* _detector_thread;
static void low_memory_detector_thread_entry(JavaThread* thread, TRAPS);
static void check_memory_usage();
static bool has_pending_requests();
static bool temporary_disabled() { return _disabled_count > 0; }
static void disable() { Atomic::inc(&_disabled_count); }
static void enable() { Atomic::dec(&_disabled_count); }
static void process_sensor_changes(TRAPS);
public:
static void initialize();
static void detect_low_memory();
static void detect_low_memory(MemoryPool* pool);
static void detect_after_gc_memory(MemoryPool* pool);
......@@ -275,7 +274,6 @@ public:
}
}
}
};
class LowMemoryDetectorDisabler: public StackObj {
......
......@@ -38,6 +38,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/jniHandles.hpp"
#include "runtime/os.hpp"
#include "runtime/serviceThread.hpp"
#include "services/classLoadingService.hpp"
#include "services/heapDumper.hpp"
#include "services/lowMemoryDetector.hpp"
......@@ -112,8 +113,8 @@ void Management::init() {
}
void Management::initialize(TRAPS) {
// Start the low memory detector thread
LowMemoryDetector::initialize();
// Start the service thread
ServiceThread::initialize();
if (ManagementServer) {
ResourceMark rm(THREAD);
......
......@@ -46,7 +46,7 @@
#define SERVICES_KERNEL
#define KERNEL_RETURN {}
#define KERNEL_RETURN_(code) { code }
#define KERNEL_RETURN_(code) { return code; }
#else // KERNEL
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册