提交 20abc2c5 编写于 作者: C coleenp

8059100: SIGSEGV VirtualMemoryTracker::remove_released_region

Summary: Disallow increasing native memory tracking level
Reviewed-by: hseigel, ctornqvi, gtriantafill
上级 b26919da
...@@ -331,7 +331,36 @@ WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num)) ...@@ -331,7 +331,36 @@ WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
} }
WB_END WB_END
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
MemTracker::transition_to(NMT_off);
return MemTracker::tracking_level() == NMT_off;
} else {
assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking");
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now");
// Can't go to detail once NMT is set to summary.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now");
// Shutdown sets tracking level to minimal.
MemTracker::shutdown();
assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now");
// Once the tracking level is minimal, we cannot increase to summary.
// The code ignores this request instead of asserting because if the malloc site
// table overflows in another thread, it tries to change the code to summary.
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
// Really can never go up to detail, verify that the code would never do this.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
return MemTracker::tracking_level() == NMT_minimal;
}
WB_END
#endif // INCLUDE_NMT #endif // INCLUDE_NMT
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) { static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
...@@ -936,6 +965,7 @@ static JNINativeMethod methods[] = { ...@@ -936,6 +965,7 @@ static JNINativeMethod methods[] = {
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory }, {CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket}, {CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported}, {CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
#endif // INCLUDE_NMT #endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll }, {CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I", {CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
......
...@@ -51,14 +51,6 @@ size_t MallocMemorySnapshot::total_arena() const { ...@@ -51,14 +51,6 @@ size_t MallocMemorySnapshot::total_arena() const {
return amount; return amount;
} }
void MallocMemorySnapshot::reset() {
_tracking_header.reset();
for (int index = 0; index < mt_number_of_types; index ++) {
_malloc[index].reset();
}
}
// Make adjustment by subtracting chunks used by arenas // Make adjustment by subtracting chunks used by arenas
// from total chunks to get total free chunck size // from total chunks to get total free chunck size
void MallocMemorySnapshot::make_adjustment() { void MallocMemorySnapshot::make_adjustment() {
...@@ -116,14 +108,9 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) { ...@@ -116,14 +108,9 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) {
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert(from != NMT_off, "Can not transition from off state"); assert(from != NMT_off, "Can not transition from off state");
assert(to != NMT_off, "Can not transition to off state"); assert(to != NMT_off, "Can not transition to off state");
if (from == NMT_minimal) { assert (from != NMT_minimal, "cannot transition from minimal state");
MallocMemorySummary::reset();
}
if (to == NMT_detail) { if (from == NMT_detail) {
assert(from == NMT_minimal || from == NMT_summary, "Just check");
return MallocSiteTable::initialize();
} else if (from == NMT_detail) {
assert(to == NMT_minimal || to == NMT_summary, "Just check"); assert(to == NMT_minimal || to == NMT_summary, "Just check");
MallocSiteTable::shutdown(); MallocSiteTable::shutdown();
} }
......
...@@ -51,14 +51,6 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC { ...@@ -51,14 +51,6 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
DEBUG_ONLY(_peak_size = 0;) DEBUG_ONLY(_peak_size = 0;)
} }
// Reset counters
void reset() {
_size = 0;
_count = 0;
DEBUG_ONLY(_peak_size = 0;)
DEBUG_ONLY(_peak_count = 0;)
}
inline void allocate(size_t sz) { inline void allocate(size_t sz) {
Atomic::add(1, (volatile MemoryCounterType*)&_count); Atomic::add(1, (volatile MemoryCounterType*)&_count);
if (sz > 0) { if (sz > 0) {
...@@ -124,11 +116,6 @@ class MallocMemory VALUE_OBJ_CLASS_SPEC { ...@@ -124,11 +116,6 @@ class MallocMemory VALUE_OBJ_CLASS_SPEC {
_arena.resize(sz); _arena.resize(sz);
} }
void reset() {
_malloc.reset();
_arena.reset();
}
inline size_t malloc_size() const { return _malloc.size(); } inline size_t malloc_size() const { return _malloc.size(); }
inline size_t malloc_count() const { return _malloc.count();} inline size_t malloc_count() const { return _malloc.count();}
inline size_t arena_size() const { return _arena.size(); } inline size_t arena_size() const { return _arena.size(); }
...@@ -176,8 +163,6 @@ class MallocMemorySnapshot : public ResourceObj { ...@@ -176,8 +163,6 @@ class MallocMemorySnapshot : public ResourceObj {
return s->by_type(mtThreadStack)->malloc_count(); return s->by_type(mtThreadStack)->malloc_count();
} }
void reset();
void copy_to(MallocMemorySnapshot* s) { void copy_to(MallocMemorySnapshot* s) {
s->_tracking_header = _tracking_header; s->_tracking_header = _tracking_header;
for (int index = 0; index < mt_number_of_types; index ++) { for (int index = 0; index < mt_number_of_types; index ++) {
...@@ -240,11 +225,6 @@ class MallocMemorySummary : AllStatic { ...@@ -240,11 +225,6 @@ class MallocMemorySummary : AllStatic {
return as_snapshot()->malloc_overhead()->size(); return as_snapshot()->malloc_overhead()->size();
} }
// Reset all counters to zero
static void reset() {
as_snapshot()->reset();
}
static MallocMemorySnapshot* as_snapshot() { static MallocMemorySnapshot* as_snapshot() {
return (MallocMemorySnapshot*)_snapshot; return (MallocMemorySnapshot*)_snapshot;
} }
......
...@@ -90,10 +90,6 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC { ...@@ -90,10 +90,6 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
_class_count(0) { _class_count(0) {
} }
~MemBaseline() {
reset();
}
bool baseline(bool summaryOnly = true); bool baseline(bool summaryOnly = true);
BaselineType baseline_type() const { return _baseline_type; } BaselineType baseline_type() const { return _baseline_type; }
...@@ -169,8 +165,7 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC { ...@@ -169,8 +165,7 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
// reset the baseline for reuse // reset the baseline for reuse
void reset() { void reset() {
_baseline_type = Not_baselined; _baseline_type = Not_baselined;
_malloc_memory_snapshot.reset(); // _malloc_memory_snapshot and _virtual_memory_snapshot are copied over.
_virtual_memory_snapshot.reset();
_class_count = 0; _class_count = 0;
_malloc_sites.clear(); _malloc_sites.clear();
......
...@@ -96,20 +96,6 @@ class MemSummaryReporter : public MemReporterBase { ...@@ -96,20 +96,6 @@ class MemSummaryReporter : public MemReporterBase {
size_t _class_count; size_t _class_count;
public: public:
// Report summary tracking data from global snapshots directly.
// This constructor is used for final reporting and hs_err reporting.
MemSummaryReporter(MallocMemorySnapshot* malloc_snapshot,
VirtualMemorySnapshot* vm_snapshot, outputStream* output,
size_t class_count = 0, size_t scale = K) :
MemReporterBase(output, scale),
_malloc_snapshot(malloc_snapshot),
_vm_snapshot(vm_snapshot) {
if (class_count == 0) {
_class_count = InstanceKlass::number_of_instance_classes();
} else {
_class_count = class_count;
}
}
// This constructor is for normal reporting from a recent baseline. // This constructor is for normal reporting from a recent baseline.
MemSummaryReporter(MemBaseline& baseline, outputStream* output, MemSummaryReporter(MemBaseline& baseline, outputStream* output,
size_t scale = K) : MemReporterBase(output, scale), size_t scale = K) : MemReporterBase(output, scale),
......
...@@ -144,11 +144,9 @@ void Tracker::record(address addr, size_t size) { ...@@ -144,11 +144,9 @@ void Tracker::record(address addr, size_t size) {
} }
// Shutdown can only be issued via JCmd, and NMT JCmd is serialized // Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
// by lock
void MemTracker::shutdown() { void MemTracker::shutdown() {
// We can only shutdown NMT to minimal tracking level if it is // We can only shutdown NMT to minimal tracking level if it is ever on.
// ever on.
if (tracking_level () > NMT_minimal) { if (tracking_level () > NMT_minimal) {
transition_to(NMT_minimal); transition_to(NMT_minimal);
} }
...@@ -157,45 +155,36 @@ void MemTracker::shutdown() { ...@@ -157,45 +155,36 @@ void MemTracker::shutdown() {
bool MemTracker::transition_to(NMT_TrackingLevel level) { bool MemTracker::transition_to(NMT_TrackingLevel level) {
NMT_TrackingLevel current_level = tracking_level(); NMT_TrackingLevel current_level = tracking_level();
assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
if (current_level == level) { if (current_level == level) {
return true; return true;
} else if (current_level > level) { } else if (current_level > level) {
// Downgrade tracking level, we want to lower the tracking // Downgrade tracking level, we want to lower the tracking level first
// level first
_tracking_level = level; _tracking_level = level;
// Make _tracking_level visible immediately. // Make _tracking_level visible immediately.
OrderAccess::fence(); OrderAccess::fence();
VirtualMemoryTracker::transition(current_level, level); VirtualMemoryTracker::transition(current_level, level);
MallocTracker::transition(current_level, level); MallocTracker::transition(current_level, level);
if (level == NMT_minimal) _baseline.reset();
} else { } else {
VirtualMemoryTracker::transition(current_level, level); // Upgrading tracking level is not supported and has never been supported.
MallocTracker::transition(current_level, level); // Allocating and deallocating malloc tracking structures is not thread safe and
// leads to inconsistencies unless a lot coarser locks are added.
_tracking_level = level;
// Make _tracking_level visible immediately.
OrderAccess::fence();
} }
return true; return true;
} }
void MemTracker::final_report(outputStream* output) { void MemTracker::report(bool summary_only, outputStream* output) {
assert(output != NULL, "No output stream"); assert(output != NULL, "No output stream");
if (tracking_level() >= NMT_summary) { MemBaseline baseline;
MallocMemorySnapshot* malloc_memory_snapshot = if (baseline.baseline(summary_only)) {
MallocMemorySummary::as_snapshot(); if (summary_only) {
malloc_memory_snapshot->make_adjustment(); MemSummaryReporter rpt(baseline, output);
rpt.report();
VirtualMemorySnapshot* virtual_memory_snapshot = } else {
VirtualMemorySummary::as_snapshot(); MemDetailReporter rpt(baseline, output);
rpt.report();
MemSummaryReporter rptr(malloc_memory_snapshot, }
virtual_memory_snapshot, output);
rptr.report();
// shutdown NMT, the data no longer accurate
shutdown();
} }
} }
......
...@@ -70,6 +70,7 @@ class MemTracker : AllStatic { ...@@ -70,6 +70,7 @@ class MemTracker : AllStatic {
static inline void release_thread_stack(void* addr, size_t size) { } static inline void release_thread_stack(void* addr, size_t size) { }
static void final_report(outputStream*) { } static void final_report(outputStream*) { }
static void error_report(outputStream*) { }
}; };
#else #else
...@@ -270,13 +271,20 @@ class MemTracker : AllStatic { ...@@ -270,13 +271,20 @@ class MemTracker : AllStatic {
// other tools. // other tools.
static inline Mutex* query_lock() { return _query_lock; } static inline Mutex* query_lock() { return _query_lock; }
// Make a final report and shutdown. // Make a final report or report for hs_err file.
// This function generates summary report without creating snapshots, static void error_report(outputStream* output) {
// to avoid additional memory allocation. It uses native memory summary if (tracking_level() >= NMT_summary) {
// counters, and makes adjustment to them, once the adjustment is made, report(true, output); // just print summary for error case.
// the counters are no longer accurate. As the result, this function }
// should only be used for final reporting before shutting down. }
static void final_report(outputStream*);
static void final_report(outputStream* output) {
NMT_TrackingLevel level = tracking_level();
if (level >= NMT_summary) {
report(level == NMT_summary, output);
}
}
// Stored baseline // Stored baseline
static inline MemBaseline& get_baseline() { static inline MemBaseline& get_baseline() {
...@@ -291,6 +299,7 @@ class MemTracker : AllStatic { ...@@ -291,6 +299,7 @@ class MemTracker : AllStatic {
private: private:
static NMT_TrackingLevel init_tracking_level(); static NMT_TrackingLevel init_tracking_level();
static void report(bool summary_only, outputStream* output);
private: private:
// Tracking level // Tracking level
......
...@@ -443,26 +443,28 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { ...@@ -443,26 +443,28 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) { bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != NULL, "Sanity check"); assert(_reserved_regions != NULL, "Sanity check");
ThreadCritical tc; ThreadCritical tc;
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head(); // Check that the _reserved_regions haven't been deleted.
while (head != NULL) { if (_reserved_regions != NULL) {
const ReservedMemoryRegion* rgn = head->peek(); LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
if (!walker->do_allocation_site(rgn)) { while (head != NULL) {
return false; const ReservedMemoryRegion* rgn = head->peek();
if (!walker->do_allocation_site(rgn)) {
return false;
}
head = head->next();
} }
head = head->next(); }
}
return true; return true;
} }
// Transition virtual memory tracking level. // Transition virtual memory tracking level.
bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) { bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
if (from == NMT_minimal) { assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
assert(to == NMT_summary || to == NMT_detail, "Just check"); if (to == NMT_minimal) {
VirtualMemorySummary::reset();
} else if (to == NMT_minimal) {
assert(from == NMT_summary || from == NMT_detail, "Just check"); assert(from == NMT_summary || from == NMT_detail, "Just check");
// Clean up virtual memory tracking data structures. // Clean up virtual memory tracking data structures.
ThreadCritical tc; ThreadCritical tc;
// Check for potential race with other thread calling transition
if (_reserved_regions != NULL) { if (_reserved_regions != NULL) {
delete _reserved_regions; delete _reserved_regions;
_reserved_regions = NULL; _reserved_regions = NULL;
......
...@@ -62,11 +62,6 @@ class VirtualMemory VALUE_OBJ_CLASS_SPEC { ...@@ -62,11 +62,6 @@ class VirtualMemory VALUE_OBJ_CLASS_SPEC {
_committed -= sz; _committed -= sz;
} }
void reset() {
_reserved = 0;
_committed = 0;
}
inline size_t reserved() const { return _reserved; } inline size_t reserved() const { return _reserved; }
inline size_t committed() const { return _committed; } inline size_t committed() const { return _committed; }
}; };
...@@ -123,12 +118,6 @@ class VirtualMemorySnapshot : public ResourceObj { ...@@ -123,12 +118,6 @@ class VirtualMemorySnapshot : public ResourceObj {
return amount; return amount;
} }
inline void reset() {
for (int index = 0; index < mt_number_of_types; index ++) {
_virtual_memory[index].reset();
}
}
void copy_to(VirtualMemorySnapshot* s) { void copy_to(VirtualMemorySnapshot* s) {
for (int index = 0; index < mt_number_of_types; index ++) { for (int index = 0; index < mt_number_of_types; index ++) {
s->_virtual_memory[index] = _virtual_memory[index]; s->_virtual_memory[index] = _virtual_memory[index];
...@@ -174,10 +163,6 @@ class VirtualMemorySummary : AllStatic { ...@@ -174,10 +163,6 @@ class VirtualMemorySummary : AllStatic {
as_snapshot()->copy_to(s); as_snapshot()->copy_to(s);
} }
static inline void reset() {
as_snapshot()->reset();
}
static VirtualMemorySnapshot* as_snapshot() { static VirtualMemorySnapshot* as_snapshot() {
return (VirtualMemorySnapshot*)_snapshot; return (VirtualMemorySnapshot*)_snapshot;
} }
......
...@@ -774,7 +774,7 @@ void VMError::report(outputStream* st) { ...@@ -774,7 +774,7 @@ void VMError::report(outputStream* st) {
STEP(228, "(Native Memory Tracking)" ) STEP(228, "(Native Memory Tracking)" )
if (_verbose) { if (_verbose) {
MemTracker::final_report(st); MemTracker::error_report(st);
} }
STEP(230, "" ) STEP(230, "" )
......
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8059100
* @summary Test that you can decrease NMT tracking level but not increase it.
* @key nmt
* @library /testlibrary /testlibrary/whitebox
* @build ChangeTrackingLevel
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ChangeTrackingLevel
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
public class ChangeTrackingLevel {
public static WhiteBox wb = WhiteBox.getWhiteBox();
public static void main(String args[]) throws Exception {
boolean testChangeLevel = wb.NMTChangeTrackingLevel();
if (testChangeLevel) {
System.out.println("NMT level change test passed.");
} else {
// it also fails if the VM asserts.
throw new RuntimeException("NMT level change test failed");
}
}
};
...@@ -101,6 +101,7 @@ public class WhiteBox { ...@@ -101,6 +101,7 @@ public class WhiteBox {
public native void NMTOverflowHashBucket(long num); public native void NMTOverflowHashBucket(long num);
public native long NMTMallocWithPseudoStack(long size, int index); public native long NMTMallocWithPseudoStack(long size, int index);
public native boolean NMTIsDetailSupported(); public native boolean NMTIsDetailSupported();
public native boolean NMTChangeTrackingLevel();
// Compiler // Compiler
public native void deoptimizeAll(); public native void deoptimizeAll();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册