提交 40934fb4 编写于 作者: S sla

Merge

......@@ -59,6 +59,7 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
public boolean doObj(Oop oop) {
try {
writeHeapRecordPrologue();
if (oop instanceof TypeArray) {
writePrimitiveArray((TypeArray)oop);
} else if (oop instanceof ObjArray) {
......@@ -97,6 +98,7 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
// not-a-Java-visible oop
writeInternalObject(oop);
}
writeHeapRecordEpilogue();
} catch (IOException exp) {
throw new RuntimeException(exp);
}
......@@ -416,6 +418,12 @@ public abstract class AbstractHeapGraphWriter implements HeapGraphWriter {
protected void writeHeapFooter() throws IOException {
}
protected void writeHeapRecordPrologue() throws IOException {
}
protected void writeHeapRecordEpilogue() throws IOException {
}
// HeapVisitor, OopVisitor methods can't throw any non-runtime
// exception. But, derived class write methods (which are called
// from visitor callbacks) may throw IOException. Hence, we throw
......
......@@ -44,7 +44,7 @@ import sun.jvm.hotspot.runtime.*;
* WARNING: This format is still under development, and is subject to
* change without notice.
*
* header "JAVA PROFILE 1.0.1" (0-terminated)
* header "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2" (0-terminated)
* u4 size of identifiers. Identifiers are used to represent
* UTF8 strings, objects, stack traces, etc. They usually
* have the same size as host pointers. For example, on
......@@ -292,11 +292,34 @@ import sun.jvm.hotspot.runtime.*;
* 0x00000002: cpu sampling on/off
* u2 stack trace depth
*
*
* When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
* be generated as a sequence of heap dump segments. This sequence is
* terminated by an end record. The additional tags allowed by format
* "JAVA PROFILE 1.0.2" are:
*
* HPROF_HEAP_DUMP_SEGMENT denote a heap dump segment
*
* [heap dump sub-records]*
* The same sub-record types allowed by HPROF_HEAP_DUMP
*
* HPROF_HEAP_DUMP_END denotes the end of a heap dump
*
*/
public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// The heap size threshold used to determine if segmented format
// ("JAVA PROFILE 1.0.2") should be used.
private static final long HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD = 2L * 0x40000000;
// The approximate size of a heap segment. Used to calculate when to create
// a new segment.
private static final long HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE = 1L * 0x40000000;
// hprof binary file header
private static final String HPROF_HEADER = "JAVA PROFILE 1.0.1";
private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
// constants in enum HprofTag
private static final int HPROF_UTF8 = 0x01;
......@@ -312,6 +335,10 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static final int HPROF_CPU_SAMPLES = 0x0D;
private static final int HPROF_CONTROL_SETTINGS = 0x0E;
// 1.0.2 record types
private static final int HPROF_HEAP_DUMP_SEGMENT = 0x1C;
private static final int HPROF_HEAP_DUMP_END = 0x2C;
// Heap dump constants
// constants in enum HprofGcTag
private static final int HPROF_GC_ROOT_UNKNOWN = 0xFF;
......@@ -352,11 +379,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static final int JVM_SIGNATURE_ARRAY = '[';
private static final int JVM_SIGNATURE_CLASS = 'L';
public synchronized void write(String fileName) throws IOException {
// open file stream and create buffered data output stream
FileOutputStream fos = new FileOutputStream(fileName);
FileChannel chn = fos.getChannel();
fos = new FileOutputStream(fileName);
out = new DataOutputStream(new BufferedOutputStream(fos));
VM vm = VM.getVM();
......@@ -385,6 +410,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
FLOAT_SIZE = objectHeap.getFloatSize();
DOUBLE_SIZE = objectHeap.getDoubleSize();
// Check weather we should dump the heap as segments
useSegmentedHeapDump = vm.getUniverse().heap().used() > HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD;
// hprof bin format header
writeFileHeader();
......@@ -394,38 +422,87 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// hprof UTF-8 symbols section
writeSymbols();
// HPROF_LOAD_CLASS records for all classes
writeClasses();
// write heap data now
out.writeByte((byte)HPROF_HEAP_DUMP);
out.writeInt(0); // relative timestamp
// remember position of dump length, we will fixup
// length later - hprof format requires length.
out.flush();
long dumpStart = chn.position();
// write dummy length of 0 and we'll fix it later.
out.writeInt(0);
// write CLASS_DUMP records
writeClassDumpRecords();
// this will write heap data into the buffer stream
super.write();
// flush buffer stream.
out.flush();
// Fill in final length
fillInHeapRecordLength();
if (useSegmentedHeapDump) {
// Write heap segment-end record
out.writeByte((byte) HPROF_HEAP_DUMP_END);
out.writeInt(0);
out.writeInt(0);
}
// flush buffer stream and throw it.
out.flush();
out = null;
// close the file stream
fos.close();
}
@Override
protected void writeHeapRecordPrologue() throws IOException {
if (currentSegmentStart == 0) {
// write heap data header, depending on heap size use segmented heap
// format
out.writeByte((byte) (useSegmentedHeapDump ? HPROF_HEAP_DUMP_SEGMENT
: HPROF_HEAP_DUMP));
out.writeInt(0);
// remember position of dump length, we will fixup
// length later - hprof format requires length.
out.flush();
currentSegmentStart = fos.getChannel().position();
// write dummy length of 0 and we'll fix it later.
out.writeInt(0);
}
}
@Override
protected void writeHeapRecordEpilogue() throws IOException {
if (useSegmentedHeapDump) {
out.flush();
if ((fos.getChannel().position() - currentSegmentStart - 4) >= HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE) {
fillInHeapRecordLength();
currentSegmentStart = 0;
}
}
}
private void fillInHeapRecordLength() throws IOException {
// now get current position to calculate length
long dumpEnd = chn.position();
long dumpEnd = fos.getChannel().position();
// calculate length of heap data
int dumpLen = (int) (dumpEnd - dumpStart - 4);
long dumpLenLong = (dumpEnd - currentSegmentStart - 4L);
// Check length boundary, overflow could happen but is _very_ unlikely
if(dumpLenLong >= (4L * 0x40000000)){
throw new RuntimeException("Heap segment size overflow.");
}
// Save the current position
long currentPosition = fos.getChannel().position();
// seek the position to write length
chn.position(dumpStart);
fos.getChannel().position(currentSegmentStart);
int dumpLen = (int) dumpLenLong;
// write length as integer
fos.write((dumpLen >>> 24) & 0xFF);
......@@ -433,8 +510,8 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
fos.write((dumpLen >>> 8) & 0xFF);
fos.write((dumpLen >>> 0) & 0xFF);
// close the file stream
fos.close();
//Reset to previous current position
fos.getChannel().position(currentPosition);
}
private void writeClassDumpRecords() throws IOException {
......@@ -443,7 +520,9 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
sysDict.allClassesDo(new SystemDictionary.ClassVisitor() {
public void visit(Klass k) {
try {
writeHeapRecordPrologue();
writeClassDumpRecord(k);
writeHeapRecordEpilogue();
} catch (IOException e) {
throw new RuntimeException(e);
}
......@@ -884,7 +963,12 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// writes hprof binary file header
private void writeFileHeader() throws IOException {
// version string
out.writeBytes(HPROF_HEADER);
if(useSegmentedHeapDump) {
out.writeBytes(HPROF_HEADER_1_0_2);
}
else {
out.writeBytes(HPROF_HEADER_1_0_1);
}
out.writeByte((byte)'\0');
// write identifier size. we use pointers as identifiers.
......@@ -976,6 +1060,7 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static final int EMPTY_FRAME_DEPTH = -1;
private DataOutputStream out;
private FileOutputStream fos;
private Debugger dbg;
private ObjectHeap objectHeap;
private SymbolTable symTbl;
......@@ -983,6 +1068,10 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
// oopSize of the debuggee
private int OBJ_ID_SIZE;
// Added for hprof file format 1.0.2 support
private boolean useSegmentedHeapDump;
private long currentSegmentStart;
private long BOOLEAN_BASE_OFFSET;
private long BYTE_BASE_OFFSET;
private long CHAR_BASE_OFFSET;
......@@ -1005,6 +1094,7 @@ public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
private static class ClassData {
int instSize;
List fields;
ClassData(int instSize, List fields) {
this.instSize = instSize;
this.fields = fields;
......
......@@ -84,7 +84,9 @@ needs_jdk = \
runtime/NMT/VirtualAllocTestType.java \
runtime/RedefineObject/TestRedefineObject.java \
runtime/XCheckJniJsig/XCheckJSig.java \
serviceability/attach/AttachWithStalePidFile.java
serviceability/attach/AttachWithStalePidFile.java \
serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java
# JRE adds further tests to compact3
#
......
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.lang.management.ManagementFactory;
import java.lang.management.RuntimeMXBean;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.List;
import sun.management.VMManagement;
public class JMapHProfLargeHeapProc {
private static final List<byte[]> heapGarbage = new ArrayList<>();
public static void main(String[] args) throws Exception {
buildLargeHeap(args);
// Print our pid on stdout
System.out.println("PID[" + getProcessId() + "]");
// Wait for input before termination
System.in.read();
}
private static void buildLargeHeap(String[] args) {
for (long i = 0; i < Integer.parseInt(args[0]); i++) {
heapGarbage.add(new byte[1024]);
}
}
public static int getProcessId() throws Exception {
// Get the current process id using a reflection hack
RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
Field jvm = runtime.getClass().getDeclaredField("jvm");
jvm.setAccessible(true);
VMManagement mgmt = (sun.management.VMManagement) jvm.get(runtime);
Method pid_method = mgmt.getClass().getDeclaredMethod("getProcessId");
pid_method.setAccessible(true);
int pid = (Integer) pid_method.invoke(mgmt);
return pid;
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.io.BufferedReader;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.Reader;
import java.nio.CharBuffer;
import java.util.Arrays;
import java.util.Scanner;
import com.oracle.java.testlibrary.Asserts;
import com.oracle.java.testlibrary.JDKToolFinder;
import com.oracle.java.testlibrary.JDKToolLauncher;
import com.oracle.java.testlibrary.OutputAnalyzer;
import com.oracle.java.testlibrary.Platform;
import com.oracle.java.testlibrary.ProcessTools;
/*
* @test
* @bug 6313383
* @key regression
* @summary Regression test for hprof export issue due to large heaps (>2G)
* @library /testlibrary
* @compile JMapHProfLargeHeapProc.java
* @run main JMapHProfLargeHeapTest
*/
public class JMapHProfLargeHeapTest {
private static final String HEAP_DUMP_FILE_NAME = "heap.hprof";
private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
private static final long M = 1024L;
private static final long G = 1024L * M;
public static void main(String[] args) throws Exception {
// If we are on MacOSX, test if JMap tool is signed, otherwise return
// since test will fail with privilege error.
if (Platform.isOSX()) {
String jmapToolPath = JDKToolFinder.getCurrentJDKTool("jmap");
ProcessBuilder codesignProcessBuilder = new ProcessBuilder(
"codesign", "-v", jmapToolPath);
Process codesignProcess = codesignProcessBuilder.start();
OutputAnalyzer analyser = new OutputAnalyzer(codesignProcess);
try {
analyser.shouldNotContain("code object is not signed at all");
System.out.println("Signed jmap found at: " + jmapToolPath);
} catch (Exception e) {
// Abort since we can't know if the test will work
System.out
.println("Test aborted since we are on MacOSX and the jmap tool is not signed.");
return;
}
}
// Small heap 22 megabytes, should create 1.0.1 file format
testHProfFileFormat("-Xmx1g", 22 * M, HPROF_HEADER_1_0_1);
/**
* This test was deliberately commented out since the test system lacks
* support to handle the requirements for this kind of heap size in a
* good way. If or when it becomes possible to run this kind of tests in
* the test environment the test should be enabled again.
* */
// Large heap 2,2 gigabytes, should create 1.0.2 file format
// testHProfFileFormat("-Xmx4g", 2 * G + 2 * M, HPROF_HEADER_1_0_2);
}
private static void testHProfFileFormat(String vmArgs, long heapSize,
String expectedFormat) throws Exception, IOException,
InterruptedException, FileNotFoundException {
ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(
vmArgs, "JMapHProfLargeHeapProc", String.valueOf(heapSize));
procBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
Process largeHeapProc = procBuilder.start();
try (Scanner largeHeapScanner = new Scanner(
largeHeapProc.getInputStream());) {
String pidstring = null;
while ((pidstring = largeHeapScanner.findInLine("PID\\[[0-9].*\\]")) == null) {
Thread.sleep(500);
}
int pid = Integer.parseInt(pidstring.substring(4,
pidstring.length() - 1));
System.out.println("Extracted pid: " + pid);
JDKToolLauncher jMapLauncher = JDKToolLauncher
.create("jmap", false);
jMapLauncher.addToolArg("-dump:format=b,file=" + pid + "-"
+ HEAP_DUMP_FILE_NAME);
jMapLauncher.addToolArg(String.valueOf(pid));
ProcessBuilder jMapProcessBuilder = new ProcessBuilder(
jMapLauncher.getCommand());
System.out.println("jmap command: "
+ Arrays.toString(jMapLauncher.getCommand()));
Process jMapProcess = jMapProcessBuilder.start();
OutputAnalyzer analyzer = new OutputAnalyzer(jMapProcess);
analyzer.shouldHaveExitValue(0);
analyzer.shouldContain(pid + "-" + HEAP_DUMP_FILE_NAME);
analyzer.shouldContain("Heap dump file created");
largeHeapProc.getOutputStream().write('\n');
File dumpFile = new File(pid + "-" + HEAP_DUMP_FILE_NAME);
Asserts.assertTrue(dumpFile.exists(), "Heap dump file not found.");
try (Reader reader = new BufferedReader(new FileReader(dumpFile))) {
CharBuffer buf = CharBuffer.allocate(expectedFormat.length());
reader.read(buf);
buf.clear();
Asserts.assertEQ(buf.toString(), expectedFormat,
"Wrong file format. Expected '" + expectedFormat
+ "', but found '" + buf.toString() + "'");
}
System.out.println("Success!");
} finally {
largeHeapProc.destroyForcibly();
}
}
}
......@@ -23,20 +23,17 @@
package com.oracle.java.testlibrary;
import java.util.List;
import java.util.ArrayList;
import java.util.Arrays;
import com.oracle.java.testlibrary.JDKToolFinder;
import com.oracle.java.testlibrary.ProcessTools;
import java.util.List;
/**
* A utility for constructing command lines for starting JDK tool processes.
*
* The JDKToolLauncher can in particular be combined with a
* java.lang.ProcessBuilder to easily run a JDK tool. For example, the
* following code run {@code jmap -heap} against a process with GC logging
* turned on for the {@code jmap} process:
* java.lang.ProcessBuilder to easily run a JDK tool. For example, the following
* code run {@code jmap -heap} against a process with GC logging turned on for
* the {@code jmap} process:
*
* <pre>
* {@code
......@@ -55,19 +52,39 @@ public class JDKToolLauncher {
private final List<String> vmArgs = new ArrayList<String>();
private final List<String> toolArgs = new ArrayList<String>();
private JDKToolLauncher(String tool) {
executable = JDKToolFinder.getJDKTool(tool);
private JDKToolLauncher(String tool, boolean useCompilerJDK) {
if (useCompilerJDK) {
executable = JDKToolFinder.getJDKTool(tool);
} else {
executable = JDKToolFinder.getCurrentJDKTool(tool);
}
vmArgs.addAll(Arrays.asList(ProcessTools.getPlatformSpecificVMArgs()));
}
/**
* Creates a new JDKToolLauncher for the specified tool.
* Creates a new JDKToolLauncher for the specified tool. Using tools path
* from the compiler JDK.
*
* @param tool The name of the tool
* @param tool
* The name of the tool
* @return A new JDKToolLauncher
*/
public static JDKToolLauncher create(String tool) {
return new JDKToolLauncher(tool);
return new JDKToolLauncher(tool, true);
}
/**
* Creates a new JDKToolLauncher for the specified tool.
*
* @param tool
* The name of the tool
* @param useCompilerPath
* If true use the compiler JDK path, otherwise use the tested
* JDK path.
* @return A new JDKToolLauncher
*/
public static JDKToolLauncher create(String tool, boolean useCompilerJDK) {
return new JDKToolLauncher(tool, useCompilerJDK);
}
/**
......@@ -80,7 +97,8 @@ public class JDKToolLauncher {
* automatically added.
*
*
* @param arg The argument to VM running the tool
* @param arg
* The argument to VM running the tool
* @return The JDKToolLauncher instance
*/
public JDKToolLauncher addVMArg(String arg) {
......@@ -91,7 +109,8 @@ public class JDKToolLauncher {
/**
* Adds an argument to the tool.
*
* @param arg The argument to the tool
* @param arg
* The argument to the tool
* @return The JDKToolLauncher instance
*/
public JDKToolLauncher addToolArg(String arg) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册