提交 1ae7a1e2 编写于 作者: A amurillo

Merge

......@@ -47,14 +47,11 @@ public class ConstMethod extends Oop {
private static int HAS_LINENUMBER_TABLE;
private static int HAS_CHECKED_EXCEPTIONS;
private static int HAS_LOCALVARIABLE_TABLE;
private static int HAS_EXCEPTION_TABLE;
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("constMethodOopDesc");
constants = new OopField(type.getOopField("_constants"), 0);
// The exception handler table. 4-tuples of ints [start_pc, end_pc,
// handler_pc, catch_type index] For methods with no exceptions the
// table is pointing to Universe::the_empty_int_array
exceptionTable = new OopField(type.getOopField("_exception_table"), 0);
constMethodSize = new CIntField(type.getCIntegerField("_constMethod_size"), 0);
flags = new ByteField(type.getJByteField("_flags"), 0);
......@@ -62,6 +59,7 @@ public class ConstMethod extends Oop {
HAS_LINENUMBER_TABLE = db.lookupIntConstant("constMethodOopDesc::_has_linenumber_table").intValue();
HAS_CHECKED_EXCEPTIONS = db.lookupIntConstant("constMethodOopDesc::_has_checked_exceptions").intValue();
HAS_LOCALVARIABLE_TABLE = db.lookupIntConstant("constMethodOopDesc::_has_localvariable_table").intValue();
HAS_EXCEPTION_TABLE = db.lookupIntConstant("constMethodOopDesc::_has_exception_table").intValue();
// Size of Java bytecodes allocated immediately after constMethodOop.
codeSize = new CIntField(type.getCIntegerField("_code_size"), 0);
......@@ -78,6 +76,9 @@ public class ConstMethod extends Oop {
type = db.lookupType("LocalVariableTableElement");
localVariableTableElementSize = type.getSize();
type = db.lookupType("ExceptionTableElement");
exceptionTableElementSize = type.getSize();
}
ConstMethod(OopHandle handle, ObjectHeap heap) {
......@@ -86,7 +87,6 @@ public class ConstMethod extends Oop {
// Fields
private static OopField constants;
private static OopField exceptionTable;
private static CIntField constMethodSize;
private static ByteField flags;
private static CIntField codeSize;
......@@ -100,6 +100,7 @@ public class ConstMethod extends Oop {
private static long checkedExceptionElementSize;
private static long localVariableTableElementSize;
private static long exceptionTableElementSize;
public Method getMethod() {
InstanceKlass ik = (InstanceKlass)getConstants().getPoolHolder();
......@@ -112,10 +113,6 @@ public class ConstMethod extends Oop {
return (ConstantPool) constants.getValue(this);
}
public TypeArray getExceptionTable() {
return (TypeArray) exceptionTable.getValue(this);
}
public long getConstMethodSize() {
return constMethodSize.getValue(this);
}
......@@ -235,7 +232,6 @@ public class ConstMethod extends Oop {
super.iterateFields(visitor, doVMFields);
if (doVMFields) {
visitor.doOop(constants, true);
visitor.doOop(exceptionTable, true);
visitor.doCInt(constMethodSize, true);
visitor.doByte(flags, true);
visitor.doCInt(codeSize, true);
......@@ -326,6 +322,23 @@ public class ConstMethod extends Oop {
return ret;
}
public boolean hasExceptionTable() {
return (getFlags() & HAS_EXCEPTION_TABLE) != 0;
}
public ExceptionTableElement[] getExceptionTable() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(hasExceptionTable(), "should only be called if table is present");
}
ExceptionTableElement[] ret = new ExceptionTableElement[getExceptionTableLength()];
long offset = offsetOfExceptionTable();
for (int i = 0; i < ret.length; i++) {
ret[i] = new ExceptionTableElement(getHandle(), offset);
offset += exceptionTableElementSize;
}
return ret;
}
public boolean hasCheckedExceptions() {
return (getFlags() & HAS_CHECKED_EXCEPTIONS) != 0;
}
......@@ -415,7 +428,10 @@ public class ConstMethod extends Oop {
if (Assert.ASSERTS_ENABLED) {
Assert.that(hasLocalVariableTable(), "should only be called if table is present");
}
if (hasCheckedExceptions()) {
if (hasExceptionTable()) {
return offsetOfExceptionTable() - 2;
} else if (hasCheckedExceptions()) {
return offsetOfCheckedExceptions() - 2;
} else {
return offsetOfLastU2Element();
......@@ -432,4 +448,33 @@ public class ConstMethod extends Oop {
return offset;
}
private int getExceptionTableLength() {
if (hasExceptionTable()) {
return (int) getHandle().getCIntegerAt(offsetOfExceptionTableLength(), 2, true);
} else {
return 0;
}
}
private long offsetOfExceptionTableLength() {
if (Assert.ASSERTS_ENABLED) {
Assert.that(hasExceptionTable(), "should only be called if table is present");
}
if (hasCheckedExceptions()) {
return offsetOfCheckedExceptions() - 2;
} else {
return offsetOfLastU2Element();
}
}
private long offsetOfExceptionTable() {
long offset = offsetOfExceptionTableLength();
long length = getExceptionTableLength();
if (Assert.ASSERTS_ENABLED) {
Assert.that(length > 0, "should only be called if table is present");
}
offset -= length * exceptionTableElementSize;
return offset;
}
}
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.oops;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.interpreter.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class ExceptionTableElement {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("ExceptionTableElement");
offsetOfStartPC = type.getCIntegerField("start_pc").getOffset();
offsetOfEndPC = type.getCIntegerField("end_pc").getOffset();
offsetOfHandlerPC = type.getCIntegerField("handler_pc").getOffset();
offsetOfCatchTypeIndex = type.getCIntegerField("catch_type_index").getOffset();
}
private static long offsetOfStartPC;
private static long offsetOfEndPC;
private static long offsetOfHandlerPC;
private static long offsetOfCatchTypeIndex;
private OopHandle handle;
private long offset;
public ExceptionTableElement(OopHandle handle, long offset) {
this.handle = handle;
this.offset = offset;
}
public int getStartPC() {
return (int) handle.getCIntegerAt(offset + offsetOfStartPC, 2, true);
}
public int getEndPC() {
return (int) handle.getCIntegerAt(offset + offsetOfEndPC, 2, true);
}
public int getHandlerPC() {
return (int) handle.getCIntegerAt(offset + offsetOfHandlerPC, 2, true);
}
public int getCatchTypeIndex() {
return (int) handle.getCIntegerAt(offset + offsetOfCatchTypeIndex, 2, true);
}
}
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -651,10 +651,11 @@ public class GenerateOopMap {
boolean fellThrough = false; // False to get first BB marked.
// First mark all exception handlers as start of a basic-block
TypeArray excps = method().getExceptionTable();
for(int i = 0; i < excps.getLength(); i += 4) {
int handler_pc_idx = i+2;
markBB(excps.getIntAt(handler_pc_idx), null);
if (method().hasExceptionTable()) {
ExceptionTableElement[] excps = method().getExceptionTable();
for(int i = 0; i < excps.length; i++) {
markBB(excps[i].getHandlerPC(), null);
}
}
// Then iterate through the code
......@@ -891,15 +892,16 @@ public class GenerateOopMap {
// Mark entry basic block as alive and all exception handlers
_basic_blocks[0].markAsAlive();
TypeArray excps = method().getExceptionTable();
for(int i = 0; i < excps.getLength(); i += 4) {
int handler_pc_idx = i+2;
BasicBlock bb = getBasicBlockAt(excps.getIntAt(handler_pc_idx));
if (method().hasExceptionTable()) {
ExceptionTableElement[] excps = method().getExceptionTable();
for(int i = 0; i < excps.length; i ++) {
BasicBlock bb = getBasicBlockAt(excps[i].getHandlerPC());
// If block is not already alive (due to multiple exception handlers to same bb), then
// make it alive
if (bb.isDead())
bb.markAsAlive();
}
}
BytecodeStream bcs = new BytecodeStream(_method);
......@@ -1468,12 +1470,12 @@ public class GenerateOopMap {
if (_has_exceptions) {
int bci = itr.bci();
TypeArray exct = method().getExceptionTable();
for(int i = 0; i< exct.getLength(); i+=4) {
int start_pc = exct.getIntAt(i);
int end_pc = exct.getIntAt(i+1);
int handler_pc = exct.getIntAt(i+2);
int catch_type = exct.getIntAt(i+3);
ExceptionTableElement[] exct = method().getExceptionTable();
for(int i = 0; i< exct.length; i++) {
int start_pc = exct[i].getStartPC();
int end_pc = exct[i].getEndPC();
int handler_pc = exct[i].getHandlerPC();
int catch_type = exct[i].getCatchTypeIndex();
if (start_pc <= bci && bci < end_pc) {
BasicBlock excBB = getBasicBlockAt(handler_pc);
......@@ -2151,7 +2153,7 @@ public class GenerateOopMap {
_conflict = false;
_max_locals = (int) method().getMaxLocals();
_max_stack = (int) method().getMaxStack();
_has_exceptions = (method().getExceptionTable().getLength() > 0);
_has_exceptions = (method().hasExceptionTable());
_nof_refval_conflicts = 0;
_init_vars = new ArrayList(5); // There are seldom more than 5 init_vars
_report_result = false;
......
......@@ -127,7 +127,6 @@ public class Method extends Oop {
return getConstMethod().getConstants();
}
public MethodData getMethodData() { return (MethodData) methodData.getValue(this); }
public TypeArray getExceptionTable() { return getConstMethod().getExceptionTable(); }
/** WARNING: this is in words, not useful in this system; use getObjectSize() instead */
public long getMethodSize() { return methodSize.getValue(this); }
public long getMaxStack() { return maxStack.getValue(this); }
......@@ -328,6 +327,14 @@ public class Method extends Oop {
return null;
}
public boolean hasExceptionTable() {
return getConstMethod().hasExceptionTable();
}
public ExceptionTableElement[] getExceptionTable() {
return getConstMethod().getExceptionTable();
}
public boolean hasCheckedExceptions() {
return getConstMethod().hasCheckedExceptions();
}
......
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -504,11 +504,14 @@ public class ClassWriter implements /* imports */ ClassConstants
2 /* exp. table len. */ +
2 /* code attr. count */;
TypeArray exceptionTable = m.getExceptionTable();
final int exceptionTableLen = (int) exceptionTable.getLength();
if (exceptionTableLen != 0) {
boolean hasExceptionTable = m.hasExceptionTable();
ExceptionTableElement[] exceptionTable = null;
int exceptionTableLen = 0;
if (hasExceptionTable) {
exceptionTable = m.getExceptionTable();
exceptionTableLen = exceptionTable.length;
if (DEBUG) debugMessage("\tmethod has exception table");
codeSize += (exceptionTableLen / 4) /* exception table is 4-tuple array */
codeSize += exceptionTableLen /* exception table is 4-tuple array */
* (2 /* start_pc */ +
2 /* end_pc */ +
2 /* handler_pc */ +
......@@ -586,15 +589,15 @@ public class ClassWriter implements /* imports */ ClassConstants
dos.write(code);
// write exception table size
dos.writeShort((short) (exceptionTableLen / 4));
if (DEBUG) debugMessage("\texception table length = " + (exceptionTableLen / 4));
dos.writeShort((short) exceptionTableLen);
if (DEBUG) debugMessage("\texception table length = " + exceptionTableLen);
if (exceptionTableLen != 0) {
for (int e = 0; e < exceptionTableLen; e += 4) {
dos.writeShort((short) exceptionTable.getIntAt(e));
dos.writeShort((short) exceptionTable.getIntAt(e + 1));
dos.writeShort((short) exceptionTable.getIntAt(e + 2));
dos.writeShort((short) exceptionTable.getIntAt(e + 3));
for (int e = 0; e < exceptionTableLen; e++) {
dos.writeShort((short) exceptionTable[e].getStartPC());
dos.writeShort((short) exceptionTable[e].getEndPC());
dos.writeShort((short) exceptionTable[e].getHandlerPC());
dos.writeShort((short) exceptionTable[e].getCatchTypeIndex());
}
}
......
/*
* Copyright (c) 2002, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -783,9 +783,10 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
});
// display exception table for this method
TypeArray exceptionTable = method.getExceptionTable();
// exception table is 4 tuple array of shorts
int numEntries = (int)exceptionTable.getLength() / 4;
boolean hasException = method.hasExceptionTable();
if (hasException) {
ExceptionTableElement[] exceptionTable = method.getExceptionTable();
int numEntries = exceptionTable.length;
if (numEntries != 0) {
buf.h4("Exception Table");
buf.beginTable(1);
......@@ -796,12 +797,12 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.headerCell("catch type");
buf.endTag("tr");
for (int e = 0; e < numEntries; e += 4) {
for (int e = 0; e < numEntries; e ++) {
buf.beginTag("tr");
buf.cell(Integer.toString(exceptionTable.getIntAt(e)));
buf.cell(Integer.toString(exceptionTable.getIntAt(e + 1)));
buf.cell(Integer.toString(exceptionTable.getIntAt(e + 2)));
short cpIndex = (short) exceptionTable.getIntAt(e + 3);
buf.cell(Integer.toString(exceptionTable[e].getStartPC()));
buf.cell(Integer.toString(exceptionTable[e].getEndPC()));
buf.cell(Integer.toString(exceptionTable[e].getHandlerPC()));
short cpIndex = (short) exceptionTable[e].getCatchTypeIndex();
ConstantPool.CPSlot obj = cpIndex == 0? null : cpool.getSlotAt(cpIndex);
if (obj == null) {
buf.cell("Any");
......@@ -815,6 +816,7 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
buf.endTable();
}
}
// display constant pool hyperlink
buf.h3("Constant Pool");
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2012
HS_MAJOR_VER=24
HS_MINOR_VER=0
HS_BUILD_NUMBER=16
HS_BUILD_NUMBER=17
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -2573,6 +2573,13 @@ void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
emit_byte(0xC0 | encode);
}
void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66);
emit_byte(0x6C);
emit_byte(0xC0 | encode);
}
void Assembler::push(int32_t imm32) {
// in 64bits we push 64bits onto the stack but only
// take a 32bit immediate
......@@ -3178,6 +3185,13 @@ void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool v
emit_byte(0xC0 | encode);
}
void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
assert(VM_Version::supports_avx2() || (!vector256) && VM_Version::supports_avx(), "");
int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256);
emit_byte(0xEF);
emit_byte(0xC0 | encode);
}
void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx(), "");
bool vector256 = true;
......@@ -3189,6 +3203,17 @@ void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src)
emit_byte(0x01);
}
void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
assert(VM_Version::supports_avx2(), "");
bool vector256 = true;
int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
emit_byte(0x38);
emit_byte(0xC0 | encode);
// 0x00 - insert into lower 128 bits
// 0x01 - insert into upper 128 bits
emit_byte(0x01);
}
void Assembler::vzeroupper() {
assert(VM_Version::supports_avx(), "");
(void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
......@@ -7480,6 +7505,24 @@ void MacroAssembler::movbyte(ArrayAddress dst, int src) {
movb(as_Address(dst), src);
}
void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
movdl(dst, as_Address(src));
} else {
lea(rscratch1, src);
movdl(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
movq(dst, as_Address(src));
} else {
lea(rscratch1, src);
movq(dst, Address(rscratch1, 0));
}
}
void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
if (reachable(src)) {
if (UseXmmLoadAndClearUpper) {
......
......@@ -1466,6 +1466,9 @@ private:
void punpckldq(XMMRegister dst, XMMRegister src);
void punpckldq(XMMRegister dst, Address src);
// Interleave Low Quadwords
void punpcklqdq(XMMRegister dst, XMMRegister src);
#ifndef _LP64 // no 32bit push/pop on amd64
void pushl(Address src);
#endif
......@@ -1606,13 +1609,11 @@ private:
void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
// AVX 3-operands instructions (encoded with VEX prefix)
// AVX 3-operands scalar instructions (encoded with VEX prefix)
void vaddsd(XMMRegister dst, XMMRegister nds, Address src);
void vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vaddss(XMMRegister dst, XMMRegister nds, Address src);
void vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, Address src);
void vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vdivss(XMMRegister dst, XMMRegister nds, Address src);
......@@ -1625,13 +1626,17 @@ private:
void vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vsubss(XMMRegister dst, XMMRegister nds, Address src);
void vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
// AVX Vector instrucitons.
void vandpd(XMMRegister dst, XMMRegister nds, Address src);
void vandps(XMMRegister dst, XMMRegister nds, Address src);
void vxorpd(XMMRegister dst, XMMRegister nds, Address src);
void vxorps(XMMRegister dst, XMMRegister nds, Address src);
void vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256);
void vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src);
// AVX instruction which is used to clear upper 128 bits of YMM registers and
// to avoid transaction penalty between AVX and SSE states. There is no
......@@ -2563,6 +2568,20 @@ public:
void vxorps(XMMRegister dst, XMMRegister nds, Address src) { Assembler::vxorps(dst, nds, src); }
void vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src);
void vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
if (UseAVX > 1 || !vector256) // vpxor 256 bit is available only in AVX2
Assembler::vpxor(dst, nds, src, vector256);
else
Assembler::vxorpd(dst, nds, src, vector256);
}
// Move packed integer values from low 128 bit to hign 128 bit in 256 bit vector.
void vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
if (UseAVX > 1) // vinserti128h is available only in AVX2
Assembler::vinserti128h(dst, nds, src);
else
Assembler::vinsertf128h(dst, nds, src);
}
// Data
......@@ -2615,6 +2634,13 @@ public:
// to avoid hiding movb
void movbyte(ArrayAddress dst, int src);
// Import other mov() methods from the parent class or else
// they will be hidden by the following overriding declaration.
using Assembler::movdl;
using Assembler::movq;
void movdl(XMMRegister dst, AddressLiteral src);
void movq(XMMRegister dst, AddressLiteral src);
// Can push value or effective address
void pushptr(AddressLiteral src);
......
......@@ -562,7 +562,7 @@ void VM_Version::get_processor_features() {
AllocatePrefetchInstr = 3;
}
// On family 15h processors use XMM and UnalignedLoadStores for Array Copy
if( FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
if( supports_sse2() && FLAG_IS_DEFAULT(UseXMMForArrayCopy) ) {
UseXMMForArrayCopy = true;
}
if( FLAG_IS_DEFAULT(UseUnalignedLoadStores) && UseXMMForArrayCopy ) {
......
此差异已折叠。
......@@ -516,7 +516,7 @@ name_for_methodOop(jvm_agent_t* J, uint64_t methodOopPtr, char * result, size_t
err = read_pointer(J, methodOopPtr + OFFSET_methodOopDesc_constMethod, &constMethod);
CHECK_FAIL(err);
err = read_pointer(J->P, constMethod + OFFSET_constMethodOopDesc_constants, &constantPool);
err = read_pointer(J, constMethod + OFFSET_constMethodOopDesc_constants, &constantPool);
CHECK_FAIL(err);
/* To get name string */
......
......@@ -3505,8 +3505,10 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, BlockBeg
}
// now perform tests that are based on flag settings
if (callee->should_inline()) {
if (callee->force_inline() || callee->should_inline()) {
// ignore heuristic controls on inlining
if (callee->force_inline())
CompileTask::print_inlining(callee, scope()->level(), bci(), "force inline by annotation");
} else {
if (inline_level() > MaxInlineLevel ) INLINE_BAILOUT("too-deep inlining");
if (recursive_inline_level(callee) > MaxRecursiveInlineLevel) INLINE_BAILOUT("too-deep recursive inlining");
......
/*
* Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -79,7 +79,7 @@ ciMethod::ciMethod(methodHandle h_m) : ciObject(h_m) {
_max_locals = h_m()->max_locals();
_code_size = h_m()->code_size();
_intrinsic_id = h_m()->intrinsic_id();
_handler_count = h_m()->exception_table()->length() / 4;
_handler_count = h_m()->exception_table_length();
_uses_monitors = h_m()->access_flags().has_monitor_bytecodes();
_balanced_monitors = !_uses_monitors || h_m()->access_flags().is_monitor_matching();
_is_c1_compilable = !h_m()->is_not_c1_compilable();
......@@ -198,7 +198,7 @@ void ciMethod::load_code() {
}
// And load the exception table.
typeArrayOop exc_table = me->exception_table();
ExceptionTable exc_table(me);
// Allocate one extra spot in our list of exceptions. This
// last entry will be used to represent the possibility that
......@@ -209,13 +209,12 @@ void ciMethod::load_code() {
* (_handler_count + 1));
if (_handler_count > 0) {
for (int i=0; i<_handler_count; i++) {
int base = i*4;
_exception_handlers[i] = new (arena) ciExceptionHandler(
holder(),
/* start */ exc_table->int_at(base),
/* limit */ exc_table->int_at(base+1),
/* goto pc */ exc_table->int_at(base+2),
/* cp index */ exc_table->int_at(base+3));
/* start */ exc_table.start_pc(i),
/* limit */ exc_table.end_pc(i),
/* goto pc */ exc_table.handler_pc(i),
/* cp index */ exc_table.catch_type_index(i));
}
}
......
......@@ -160,6 +160,8 @@ class ciMethod : public ciObject {
// Code size for inlining decisions.
int code_size_for_inlining();
bool force_inline() { return get_methodOop()->force_inline(); }
int comp_level();
int highest_osr_comp_level();
......
......@@ -31,8 +31,8 @@
#include "oops/typeArrayOop.hpp"
#include "runtime/handles.inline.hpp"
#include "utilities/accessFlags.hpp"
#include "classfile/symbolTable.hpp"
class TempNewSymbol;
class FieldAllocationCount;
......@@ -50,11 +50,80 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
KlassHandle _host_klass;
GrowableArray<Handle>* _cp_patches; // overrides for CP entries
// precomputed flags
bool _has_finalizer;
bool _has_empty_finalizer;
bool _has_vanilla_constructor;
int _max_bootstrap_specifier_index; // detects BSS values
// class attributes parsed before the instance klass is created:
bool _synthetic_flag;
Symbol* _sourcefile;
Symbol* _generic_signature;
char* _sde_buffer;
int _sde_length;
typeArrayHandle _inner_classes;
typeArrayHandle _annotations;
void set_class_synthetic_flag(bool x) { _synthetic_flag = x; }
void set_class_sourcefile(Symbol* x) { _sourcefile = x; }
void set_class_generic_signature(Symbol* x) { _generic_signature = x; }
void set_class_sde_buffer(char* x, int len) { _sde_buffer = x; _sde_length = len; }
void set_class_inner_classes(typeArrayHandle x) { _inner_classes = x; }
void set_class_annotations(typeArrayHandle x) { _annotations = x; }
void init_parsed_class_attributes() {
_synthetic_flag = false;
_sourcefile = NULL;
_generic_signature = NULL;
_sde_buffer = NULL;
_sde_length = 0;
// initialize the other flags too:
_has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
_max_bootstrap_specifier_index = -1;
}
void apply_parsed_class_attributes(instanceKlassHandle k); // update k
int _max_bootstrap_specifier_index;
class AnnotationCollector {
public:
enum Location { _in_field, _in_method, _in_class };
enum ID {
_unknown = 0,
_method_ForceInline,
_annotation_LIMIT
};
const Location _location;
int _annotations_present;
AnnotationCollector(Location location)
: _location(location), _annotations_present(0)
{
assert((int)_annotation_LIMIT <= (int)sizeof(_annotations_present) * BitsPerByte, "");
}
// If this annotation name has an ID, report it (or _none).
ID annotation_index(Symbol* name);
// Set the annotation name:
void set_annotation(ID id) {
assert((int)id >= 0 && (int)id < (int)_annotation_LIMIT, "oob");
_annotations_present |= nth_bit((int)id);
}
// Report if the annotation is present.
bool has_any_annotations() { return _annotations_present != 0; }
bool has_annotation(ID id) { return (nth_bit((int)id) & _annotations_present) != 0; }
};
class FieldAnnotationCollector: public AnnotationCollector {
public:
FieldAnnotationCollector() : AnnotationCollector(_in_field) { }
void apply_to(FieldInfo* f);
};
class MethodAnnotationCollector: public AnnotationCollector {
public:
MethodAnnotationCollector() : AnnotationCollector(_in_method) { }
void apply_to(methodHandle m);
};
class ClassAnnotationCollector: public AnnotationCollector {
public:
ClassAnnotationCollector() : AnnotationCollector(_in_class) { }
void apply_to(instanceKlassHandle k);
};
enum { fixed_buffer_size = 128 };
u_char linenumbertable_buffer[fixed_buffer_size];
......@@ -87,7 +156,9 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
u2* constantvalue_index_addr,
bool* is_synthetic_addr,
u2* generic_signature_index_addr,
typeArrayHandle* field_annotations, TRAPS);
typeArrayHandle* field_annotations,
FieldAnnotationCollector* parsed_annotations,
TRAPS);
typeArrayHandle parse_fields(Symbol* class_name,
constantPoolHandle cp, bool is_interface,
FieldAllocationCount *fac,
......@@ -113,7 +184,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
objArrayHandle methods_parameter_annotations,
objArrayHandle methods_default_annotations,
TRAPS);
typeArrayHandle parse_exception_table(u4 code_length, u4 exception_table_length,
u2* parse_exception_table(u4 code_length, u4 exception_table_length,
constantPoolHandle cp, TRAPS);
void parse_linenumber_table(
u4 code_attribute_length, u4 code_length,
......@@ -128,25 +199,32 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
typeArrayOop parse_stackmap_table(u4 code_attribute_length, TRAPS);
// Classfile attribute parsing
void parse_classfile_sourcefile_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp,
instanceKlassHandle k, int length, TRAPS);
void parse_classfile_sourcefile_attribute(constantPoolHandle cp, TRAPS);
void parse_classfile_source_debug_extension_attribute(constantPoolHandle cp, int length, TRAPS);
u2 parse_classfile_inner_classes_attribute(u1* inner_classes_attribute_start,
bool parsed_enclosingmethod_attribute,
u2 enclosing_method_class_index,
u2 enclosing_method_method_index,
constantPoolHandle cp,
instanceKlassHandle k, TRAPS);
void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
void parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, instanceKlassHandle k, u4 attribute_length, TRAPS);
TRAPS);
void parse_classfile_attributes(constantPoolHandle cp,
ClassAnnotationCollector* parsed_annotations,
TRAPS);
void parse_classfile_synthetic_attribute(constantPoolHandle cp, TRAPS);
void parse_classfile_signature_attribute(constantPoolHandle cp, TRAPS);
void parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, u4 attribute_length, TRAPS);
// Annotations handling
typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
int runtime_visible_annotations_length,
u1* runtime_invisible_annotations,
int runtime_invisible_annotations_length, TRAPS);
int skip_annotation(u1* buffer, int limit, int index);
int skip_annotation_value(u1* buffer, int limit, int index);
void parse_annotations(u1* buffer, int limit, constantPoolHandle cp,
/* Results (currently, only one result is supported): */
AnnotationCollector* result,
TRAPS);
// Final setup
unsigned int compute_oop_map_count(instanceKlassHandle super,
......
......@@ -2738,17 +2738,6 @@ void java_lang_invoke_CallSite::compute_offsets() {
if (k != NULL) {
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_lang_invoke_MethodHandle_signature());
}
// Disallow compilation of CallSite.setTargetNormal and CallSite.setTargetVolatile
// (For C2: keep this until we have throttling logic for uncommon traps.)
if (k != NULL) {
instanceKlass* ik = instanceKlass::cast(k);
methodOop m_normal = ik->lookup_method(vmSymbols::setTargetNormal_name(), vmSymbols::setTarget_signature());
methodOop m_volatile = ik->lookup_method(vmSymbols::setTargetVolatile_name(), vmSymbols::setTarget_signature());
guarantee(m_normal != NULL && m_volatile != NULL, "must exist");
m_normal->set_not_compilable_quietly();
m_volatile->set_not_compilable_quietly();
}
}
......
......@@ -2771,7 +2771,6 @@ class ClassStatistics: AllStatic {
nmethods++;
method_size += m->size();
// class loader uses same objArray for empty vectors, so don't count these
if (m->exception_table()->length() != 0) method_size += m->exception_table()->size();
if (m->has_stackmap_table()) {
method_size += m->stackmap_data()->size();
}
......
/*
* Copyright (c) 1998, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -1368,14 +1368,16 @@ char* ClassVerifier::generate_code_data(methodHandle m, u4 code_length, TRAPS) {
}
void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_data, int& min, int& max, TRAPS) {
typeArrayHandle exhandlers (THREAD, _method->exception_table());
ExceptionTable exhandlers(_method());
int exlength = exhandlers.length();
constantPoolHandle cp (THREAD, _method->constants());
if (exhandlers() != NULL) {
for(int i = 0; i < exhandlers->length();) {
u2 start_pc = exhandlers->int_at(i++);
u2 end_pc = exhandlers->int_at(i++);
u2 handler_pc = exhandlers->int_at(i++);
for(int i = 0; i < exlength; i++) {
//reacquire the table in case a GC happened
ExceptionTable exhandlers(_method());
u2 start_pc = exhandlers.start_pc(i);
u2 end_pc = exhandlers.end_pc(i);
u2 handler_pc = exhandlers.handler_pc(i);
if (start_pc >= code_length || code_data[start_pc] == 0) {
class_format_error("Illegal exception table start_pc %d", start_pc);
return;
......@@ -1390,7 +1392,7 @@ void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_da
class_format_error("Illegal exception table handler_pc %d", handler_pc);
return;
}
int catch_type_index = exhandlers->int_at(i++);
int catch_type_index = exhandlers.catch_type_index(i);
if (catch_type_index != 0) {
VerificationType catch_type = cp_index_to_type(
catch_type_index, cp, CHECK_VERIFY(this));
......@@ -1409,7 +1411,6 @@ void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_da
if (start_pc < min) min = start_pc;
if (end_pc > max) max = end_pc;
}
}
}
void ClassVerifier::verify_local_variable_table(u4 code_length, char* code_data, TRAPS) {
......@@ -1474,13 +1475,15 @@ u2 ClassVerifier::verify_stackmap_table(u2 stackmap_index, u2 bci,
void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, StackMapFrame* current_frame,
StackMapTable* stackmap_table, TRAPS) {
constantPoolHandle cp (THREAD, _method->constants());
typeArrayHandle exhandlers (THREAD, _method->exception_table());
if (exhandlers() != NULL) {
for(int i = 0; i < exhandlers->length();) {
u2 start_pc = exhandlers->int_at(i++);
u2 end_pc = exhandlers->int_at(i++);
u2 handler_pc = exhandlers->int_at(i++);
int catch_type_index = exhandlers->int_at(i++);
ExceptionTable exhandlers(_method());
int exlength = exhandlers.length();
for(int i = 0; i < exlength; i++) {
//reacquire the table in case a GC happened
ExceptionTable exhandlers(_method());
u2 start_pc = exhandlers.start_pc(i);
u2 end_pc = exhandlers.end_pc(i);
u2 handler_pc = exhandlers.handler_pc(i);
int catch_type_index = exhandlers.catch_type_index(i);
if(bci >= start_pc && bci < end_pc) {
u1 flags = current_frame->flags();
if (this_uninit) { flags |= FLAG_THIS_UNINIT; }
......@@ -1505,7 +1508,6 @@ void ClassVerifier::verify_exception_handler_targets(u2 bci, bool this_uninit, S
}
}
}
}
}
void ClassVerifier::verify_cp_index(constantPoolHandle cp, int index, TRAPS) {
......
......@@ -257,6 +257,7 @@
template(java_lang_invoke_BoundMethodHandle, "java/lang/invoke/BoundMethodHandle") \
template(java_lang_invoke_DirectMethodHandle, "java/lang/invoke/DirectMethodHandle") \
template(java_lang_invoke_CountingMethodHandle, "java/lang/invoke/CountingMethodHandle") \
template(java_lang_invoke_ForceInline_signature, "Ljava/lang/invoke/ForceInline;") \
/* internal up-calls made only by the JVM, via class sun.invoke.MethodHandleNatives: */ \
template(findMethodHandleType_name, "findMethodHandleType") \
template(findMethodHandleType_signature, "(Ljava/lang/Class;[Ljava/lang/Class;)Ljava/lang/invoke/MethodType;") \
......
......@@ -131,6 +131,10 @@ public:
assert((is_reg() && value() < stack0->value() - 1) || is_stack(), "must be");
return (VMReg)(intptr_t)(value() + 1);
}
VMReg next(int i) {
assert((is_reg() && value() < stack0->value() - i) || is_stack(), "must be");
return (VMReg)(intptr_t)(value() + i);
}
VMReg prev() {
assert((is_stack() && value() > stack0->value()) || (is_reg() && value() != 0), "must be");
return (VMReg)(intptr_t)(value() - 1);
......
/*
* Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -27,6 +27,7 @@
#include "gc_implementation/g1/concurrentG1RefineThread.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "memory/space.inline.hpp"
......@@ -500,11 +501,11 @@ bool ConcurrentG1Refine::expand_card_count_cache(int cache_size_idx) {
}
void ConcurrentG1Refine::clear_and_record_card_counts() {
if (G1ConcRSLogCacheSize == 0) return;
if (G1ConcRSLogCacheSize == 0) {
return;
}
#ifndef PRODUCT
double start = os::elapsedTime();
#endif
if (_expand_card_counts) {
int new_idx = _cache_size_index + 1;
......@@ -523,11 +524,8 @@ void ConcurrentG1Refine::clear_and_record_card_counts() {
assert((this_epoch+1) <= max_jint, "to many periods");
// Update epoch
_n_periods++;
#ifndef PRODUCT
double elapsed = os::elapsedTime() - start;
_g1h->g1_policy()->record_cc_clear_time(elapsed * 1000.0);
#endif
double cc_clear_time_ms = (os::elapsedTime() - start) * 1000;
_g1h->g1_policy()->phase_times()->record_cc_clear_time_ms(cc_clear_time_ms);
}
void ConcurrentG1Refine::print_worker_threads_on(outputStream* st) const {
......
......@@ -3156,9 +3156,6 @@ bool ConcurrentMark::do_yield_check(uint worker_id) {
_g1h->g1_policy()->record_concurrent_pause();
}
cmThread()->yield();
if (worker_id == 0) {
_g1h->g1_policy()->record_concurrent_pause_end();
}
return true;
} else {
return false;
......
......@@ -33,6 +33,7 @@
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1ErgoVerbose.hpp"
#include "gc_implementation/g1/g1EvacFailure.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
#include "gc_implementation/g1/g1MarkSweep.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
......@@ -2274,7 +2275,7 @@ void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
n_completed_buffers++;
}
g1_policy()->record_update_rs_processed_buffers(worker_i,
g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i,
(double) n_completed_buffers);
dcqs.clear_n_completed_buffers();
assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
......@@ -3633,10 +3634,10 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
gclog_or_tty->date_stamp(G1Log::fine() && PrintGCDateStamps);
TraceCPUTime tcpu(G1Log::finer(), true, gclog_or_tty);
GCCauseString gc_cause_str = GCCauseString("GC pause", gc_cause())
.append(g1_policy()->gcs_are_young() ? " (young)" : " (mixed)")
.append(g1_policy()->during_initial_mark_pause() ? " (initial-mark)" : "");
TraceTime t(gc_cause_str, G1Log::fine() && !G1Log::finer(), true, gclog_or_tty);
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1);
g1_policy()->phase_times()->note_gc_start(os::elapsedTime(), active_workers,
g1_policy()->gcs_are_young(), g1_policy()->during_initial_mark_pause(), gc_cause());
TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
......@@ -3699,9 +3700,15 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// before the start GC event.
_hr_printer.start_gc(false /* full */, (size_t) total_collections());
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
//
// Preserving the old comment here if that helps the investigation:
//
// The elapsed time induced by the start time below deliberately elides
// the possible verification above.
double start_time_sec = os::elapsedTime();
double sample_start_time_sec = os::elapsedTime();
size_t start_used_bytes = used();
#if YOUNG_LIST_VERBOSE
......@@ -3710,7 +3717,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
g1_policy()->record_collection_pause_start(start_time_sec,
g1_policy()->record_collection_pause_start(sample_start_time_sec,
start_used_bytes);
double scan_wait_start = os::elapsedTime();
......@@ -3719,11 +3726,12 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// objects on them have been correctly scanned before we start
// moving them during the GC.
bool waited = _cm->root_regions()->wait_until_scan_finished();
double wait_time_ms = 0.0;
if (waited) {
double scan_wait_end = os::elapsedTime();
double wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
g1_policy()->record_root_region_scan_wait_time(wait_time_ms);
wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
}
g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
#if YOUNG_LIST_VERBOSE
gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
......@@ -3877,12 +3885,16 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
true /* verify_fingers */);
_cm->note_end_of_gc();
double end_time_sec = os::elapsedTime();
double pause_time_ms = (end_time_sec - start_time_sec) * MILLIUNITS;
g1_policy()->record_pause_time_ms(pause_time_ms);
int active_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
workers()->active_workers() : 1);
g1_policy()->record_collection_pause_end(active_workers);
// Collect thread local data to allow the ergonomics to use
// the collected information
g1_policy()->phase_times()->collapse_par_times();
// This timing is only used by the ergonomics to handle our pause target.
// It is unclear why this should not include the full pause. We will
// investigate this in CR 7178365.
double sample_end_time_sec = os::elapsedTime();
double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
g1_policy()->record_collection_pause_end(pause_time_ms);
MemoryService::track_memory_usage();
......@@ -3929,9 +3941,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
// RETIRE events are generated before the end GC event.
_hr_printer.end_gc(false /* full */, (size_t) total_collections());
// We have to do this after we decide whether to expand the heap or not.
g1_policy()->print_heap_transition();
if (mark_in_progress()) {
concurrent_mark()->update_g1_committed();
}
......@@ -3941,13 +3950,14 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
#endif
gc_epilogue(false);
g1_policy()->phase_times()->note_gc_end(os::elapsedTime());
// We have to do this after we decide whether to expand the heap or not.
g1_policy()->print_heap_transition();
}
// The closing of the inner scope, immediately above, will complete
// logging at the "fine" level. The record_collection_pause_end() call
// above will complete logging at the "finer" level.
//
// It is not yet to safe, however, to tell the concurrent mark to
// It is not yet to safe to tell the concurrent mark to
// start as we have some optional output below. We don't want the
// output from the concurrent mark thread interfering with this
// logging output either.
......@@ -4695,7 +4705,7 @@ public:
if (worker_id >= _n_workers) return; // no work needed this round
double start_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_start_time(worker_id, start_time_ms);
_g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
{
ResourceMark rm;
......@@ -4744,8 +4754,8 @@ public:
evac.do_void();
double elapsed_ms = (os::elapsedTime()-start)*1000.0;
double term_ms = pss.term_time()*1000.0;
_g1h->g1_policy()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->record_termination(worker_id, term_ms, pss.term_attempts());
_g1h->g1_policy()->phase_times()->record_obj_copy_time(worker_id, elapsed_ms-term_ms);
_g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
}
_g1h->g1_policy()->record_thread_age_table(pss.age_table());
_g1h->update_surviving_young_words(pss.surviving_young_words()+1);
......@@ -4763,7 +4773,7 @@ public:
}
double end_time_ms = os::elapsedTime() * 1000.0;
_g1h->g1_policy()->record_gc_worker_end_time(worker_id, end_time_ms);
_g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
}
};
......@@ -4874,15 +4884,15 @@ g1_process_strong_roots(bool collecting_perm_gen,
double ext_roots_end = os::elapsedTime();
g1_policy()->reset_obj_copy_time(worker_i);
g1_policy()->phase_times()->reset_obj_copy_time(worker_i);
double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
buf_scan_non_heap_roots.closure_app_seconds();
g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
g1_policy()->phase_times()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
double ext_root_time_ms =
((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
g1_policy()->phase_times()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
// During conc marking we have to filter the per-thread SATB buffers
// to make sure we remove any oops into the CSet (which will show up
......@@ -4893,7 +4903,7 @@ g1_process_strong_roots(bool collecting_perm_gen,
}
}
double satb_filtering_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
g1_policy()->record_satb_filtering_time(worker_i, satb_filtering_ms);
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
if (scan_rs != NULL) {
......@@ -5393,7 +5403,7 @@ void G1CollectedHeap::process_discovered_references() {
assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
double ref_proc_time = os::elapsedTime() - ref_proc_start;
g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
}
// Weak Reference processing during an evacuation pause (part 2).
......@@ -5430,7 +5440,7 @@ void G1CollectedHeap::enqueue_discovered_references() {
// and could signicantly increase the pause time.
double ref_enq_time = os::elapsedTime() - ref_enq_start;
g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
}
void G1CollectedHeap::evacuate_collection_set() {
......@@ -5493,11 +5503,11 @@ void G1CollectedHeap::evacuate_collection_set() {
}
double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
g1_policy()->record_par_time(par_time_ms);
g1_policy()->phase_times()->record_par_time(par_time_ms);
double code_root_fixup_time_ms =
(os::elapsedTime() - end_par_time_sec) * 1000.0;
g1_policy()->record_code_root_fixup_time(code_root_fixup_time_ms);
g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
set_par_threads(0);
......@@ -5759,7 +5769,7 @@ void G1CollectedHeap::cleanUpCardTable() {
}
double elapsed = os::elapsedTime() - start;
g1_policy()->record_clear_ct_time(elapsed * 1000.0);
g1_policy()->phase_times()->record_clear_ct_time(elapsed * 1000.0);
}
void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
......@@ -5868,8 +5878,8 @@ void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
NULL /* old_proxy_set */,
NULL /* humongous_proxy_set */,
false /* par */);
policy->record_young_free_cset_time_ms(young_time_ms);
policy->record_non_young_free_cset_time_ms(non_young_time_ms);
policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
}
// This routine is similar to the above but does not record
......
......@@ -36,6 +36,7 @@
class HeapRegion;
class CollectionSetChooser;
class G1GCPhaseTimes;
// TraceGen0Time collects data on _both_ young and mixed evacuation pauses
// (the latter may contain non-young regions - i.e. regions that are
......@@ -61,26 +62,14 @@ class TraceGen0TimeData : public CHeapObj<mtGC> {
NumberSeq _parallel_other;
NumberSeq _clear_ct;
void print_summary (int level, const char* str, const NumberSeq* seq) const;
void print_summary_sd (int level, const char* str, const NumberSeq* seq) const;
void print_summary(const char* str, const NumberSeq* seq) const;
void print_summary_sd(const char* str, const NumberSeq* seq) const;
public:
TraceGen0TimeData() : _young_pause_num(0), _mixed_pause_num(0) {};
void record_start_collection(double time_to_stop_the_world_ms);
void record_yield_time(double yield_time_ms);
void record_end_collection(
double total_ms,
double other_ms,
double root_region_scan_wait_ms,
double parallel_ms,
double ext_root_scan_ms,
double satb_filtering_ms,
double update_rs_ms,
double scan_rs_ms,
double obj_copy_ms,
double termination_ms,
double parallel_other_ms,
double clear_ct_ms);
void record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times);
void increment_young_collection_count();
void increment_mixed_collection_count();
void print() const;
......@@ -186,25 +175,9 @@ private:
CollectionSetChooser* _collectionSetChooser;
double _cur_collection_start_sec;
double _full_collection_start_sec;
size_t _cur_collection_pause_used_at_start_bytes;
uint _cur_collection_pause_used_regions_at_start;
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
double _cur_ref_enq_time_ms;
#ifndef PRODUCT
// Card Table Count Cache stats
double _min_clear_cc_time_ms; // min
double _max_clear_cc_time_ms; // max
double _cur_clear_cc_time_ms; // clearing time during current pause
double _cum_clear_cc_time_ms; // cummulative clearing time
jlong _num_cc_clears; // number of times the card count cache has been cleared
#endif
// These exclude marking times.
TruncatedSeq* _recent_gc_times_ms;
......@@ -217,23 +190,6 @@ private:
double _stop_world_start;
double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms;
double* _par_last_satb_filtering_times_ms;
double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers;
double* _par_last_scan_rs_times_ms;
double* _par_last_obj_copy_times_ms;
double* _par_last_termination_times_ms;
double* _par_last_termination_attempts;
double* _par_last_gc_worker_end_times_ms;
double* _par_last_gc_worker_times_ms;
// Each workers 'other' time i.e. the elapsed time of the parallel
// code executed by a worker minus the sum of the individual sub-phase
// times for that worker thread.
double* _par_last_gc_worker_other_times_ms;
// indicates whether we are in young or mixed GC mode
bool _gcs_are_young;
......@@ -306,10 +262,6 @@ private:
size_t _recorded_rs_lengths;
size_t _max_rs_lengths;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
double _sigma;
size_t _rs_lengths_prediction;
......@@ -341,8 +293,7 @@ private:
void set_no_of_gc_threads(uintx v) { _no_of_gc_threads = v; }
double _pause_time_target_ms;
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
size_t _pending_cards;
size_t _max_pending_cards;
......@@ -497,14 +448,6 @@ public:
uint young_cset_region_length() { return eden_cset_region_length() +
survivor_cset_region_length(); }
void record_young_free_cset_time_ms(double time_ms) {
_recorded_young_free_cset_time_ms = time_ms;
}
void record_non_young_free_cset_time_ms(double time_ms) {
_recorded_non_young_free_cset_time_ms = time_ms;
}
double predict_survivor_regions_evac_time();
void cset_regions_freed() {
......@@ -552,19 +495,6 @@ public:
}
private:
void print_stats(int level, const char* str, double value);
void print_stats(int level, const char* str, double value, int workers);
void print_stats(int level, const char* str, int value);
void print_par_stats(int level, const char* str, double* data, bool showDecimals = true);
double avg_value (double* data);
double max_value (double* data);
double sum_of_values (double* data);
double max_sum (double* data1, double* data2);
double _last_pause_time_ms;
size_t _bytes_in_collection_set_before_gc;
size_t _bytes_copied_during_gc;
......@@ -638,6 +568,8 @@ private:
// Stash a pointer to the g1 heap.
G1CollectedHeap* _g1;
G1GCPhaseTimes* _phase_times;
// The ratio of gc time to elapsed time, computed over recent pauses.
double _recent_avg_pause_time_ratio;
......@@ -677,7 +609,6 @@ private:
double _cur_mark_stop_world_time_ms;
double _mark_remark_start_sec;
double _mark_cleanup_start_sec;
double _root_region_scan_wait_time_ms;
// Update the young list target length either by setting it to the
// desired fixed value or by calculating it using G1's pause
......@@ -728,6 +659,8 @@ public:
return CollectorPolicy::G1CollectorPolicyKind;
}
G1GCPhaseTimes* phase_times() const { return _phase_times; }
// Check the current value of the young list RSet lengths and
// compare it against the last prediction. If the current value is
// higher, recalculate the young list target length prediction.
......@@ -772,10 +705,6 @@ public:
void record_concurrent_mark_init_end(double
mark_init_elapsed_time_ms);
void record_root_region_scan_wait_time(double time_ms) {
_root_region_scan_wait_time_ms = time_ms;
}
void record_concurrent_mark_remark_start();
void record_concurrent_mark_remark_end();
......@@ -784,97 +713,14 @@ public:
void record_concurrent_mark_cleanup_completed();
void record_concurrent_pause();
void record_concurrent_pause_end();
void record_collection_pause_end(int no_of_gc_threads);
void record_collection_pause_end(double pause_time);
void print_heap_transition();
// Record the fact that a full collection occurred.
void record_full_collection_start();
void record_full_collection_end();
void record_gc_worker_start_time(int worker_i, double ms) {
_par_last_gc_worker_start_times_ms[worker_i] = ms;
}
void record_ext_root_scan_time(int worker_i, double ms) {
_par_last_ext_root_scan_times_ms[worker_i] = ms;
}
void record_satb_filtering_time(int worker_i, double ms) {
_par_last_satb_filtering_times_ms[worker_i] = ms;
}
void record_update_rs_time(int thread, double ms) {
_par_last_update_rs_times_ms[thread] = ms;
}
void record_update_rs_processed_buffers (int thread,
double processed_buffers) {
_par_last_update_rs_processed_buffers[thread] = processed_buffers;
}
void record_scan_rs_time(int thread, double ms) {
_par_last_scan_rs_times_ms[thread] = ms;
}
void reset_obj_copy_time(int thread) {
_par_last_obj_copy_times_ms[thread] = 0.0;
}
void reset_obj_copy_time() {
reset_obj_copy_time(0);
}
void record_obj_copy_time(int thread, double ms) {
_par_last_obj_copy_times_ms[thread] += ms;
}
void record_termination(int thread, double ms, size_t attempts) {
_par_last_termination_times_ms[thread] = ms;
_par_last_termination_attempts[thread] = (double) attempts;
}
void record_gc_worker_end_time(int worker_i, double ms) {
_par_last_gc_worker_end_times_ms[worker_i] = ms;
}
void record_pause_time_ms(double ms) {
_last_pause_time_ms = ms;
}
void record_clear_ct_time(double ms) {
_cur_clear_ct_time_ms = ms;
}
void record_par_time(double ms) {
_cur_collection_par_time_ms = ms;
}
void record_code_root_fixup_time(double ms) {
_cur_collection_code_root_fixup_time_ms = ms;
}
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
void record_ref_enq_time(double ms) {
_cur_ref_enq_time_ms = ms;
}
#ifndef PRODUCT
void record_cc_clear_time(double ms) {
if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
_min_clear_cc_time_ms = ms;
if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms)
_max_clear_cc_time_ms = ms;
_cur_clear_cc_time_ms = ms;
_cum_clear_cc_time_ms += ms;
_num_cc_clears++;
}
#endif
// Record how much space we copied during a GC. This is typically
// called when a GC alloc region is being retired.
void record_bytes_copied_during_gc(size_t bytes) {
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1Log.hpp"
// Helper class for avoiding interleaved logging
class LineBuffer: public StackObj {
private:
static const int BUFFER_LEN = 1024;
static const int INDENT_CHARS = 3;
char _buffer[BUFFER_LEN];
int _indent_level;
int _cur;
void vappend(const char* format, va_list ap) {
int res = vsnprintf(&_buffer[_cur], BUFFER_LEN - _cur, format, ap);
if (res != -1) {
_cur += res;
} else {
DEBUG_ONLY(warning("buffer too small in LineBuffer");)
_buffer[BUFFER_LEN -1] = 0;
_cur = BUFFER_LEN; // vsnprintf above should not add to _buffer if we are called again
}
}
public:
explicit LineBuffer(int indent_level): _indent_level(indent_level), _cur(0) {
for (; (_cur < BUFFER_LEN && _cur < (_indent_level * INDENT_CHARS)); _cur++) {
_buffer[_cur] = ' ';
}
}
#ifndef PRODUCT
~LineBuffer() {
assert(_cur == _indent_level * INDENT_CHARS, "pending data in buffer - append_and_print_cr() not called?");
}
#endif
void append(const char* format, ...) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
}
void append_and_print_cr(const char* format, ...) {
va_list ap;
va_start(ap, format);
vappend(format, ap);
va_end(ap);
gclog_or_tty->print_cr("%s", _buffer);
_cur = _indent_level * INDENT_CHARS;
}
};
G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_max_gc_threads(max_gc_threads),
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
_cum_clear_cc_time_ms(0.0),
_num_cc_clears(0L)
{
assert(max_gc_threads > 0, "Must have some GC threads");
_par_last_gc_worker_start_times_ms = new double[_max_gc_threads];
_par_last_ext_root_scan_times_ms = new double[_max_gc_threads];
_par_last_satb_filtering_times_ms = new double[_max_gc_threads];
_par_last_update_rs_times_ms = new double[_max_gc_threads];
_par_last_update_rs_processed_buffers = new double[_max_gc_threads];
_par_last_scan_rs_times_ms = new double[_max_gc_threads];
_par_last_obj_copy_times_ms = new double[_max_gc_threads];
_par_last_termination_times_ms = new double[_max_gc_threads];
_par_last_termination_attempts = new double[_max_gc_threads];
_par_last_gc_worker_end_times_ms = new double[_max_gc_threads];
_par_last_gc_worker_times_ms = new double[_max_gc_threads];
_par_last_gc_worker_other_times_ms = new double[_max_gc_threads];
}
void G1GCPhaseTimes::note_gc_start(double pause_start_time_sec, uint active_gc_threads,
bool is_young_gc, bool is_initial_mark_gc, GCCause::Cause gc_cause) {
assert(active_gc_threads > 0, "The number of threads must be > 0");
assert(active_gc_threads <= _max_gc_threads, "The number of active threads must be <= the max nubmer of threads");
_active_gc_threads = active_gc_threads;
_pause_start_time_sec = pause_start_time_sec;
_is_young_gc = is_young_gc;
_is_initial_mark_gc = is_initial_mark_gc;
_gc_cause = gc_cause;
#ifdef ASSERT
// initialise the timing data to something well known so that we can spot
// if something is not set properly
for (uint i = 0; i < _max_gc_threads; ++i) {
_par_last_gc_worker_start_times_ms[i] = -1234.0;
_par_last_ext_root_scan_times_ms[i] = -1234.0;
_par_last_satb_filtering_times_ms[i] = -1234.0;
_par_last_update_rs_times_ms[i] = -1234.0;
_par_last_update_rs_processed_buffers[i] = -1234.0;
_par_last_scan_rs_times_ms[i] = -1234.0;
_par_last_obj_copy_times_ms[i] = -1234.0;
_par_last_termination_times_ms[i] = -1234.0;
_par_last_termination_attempts[i] = -1234.0;
_par_last_gc_worker_end_times_ms[i] = -1234.0;
_par_last_gc_worker_times_ms[i] = -1234.0;
_par_last_gc_worker_other_times_ms[i] = -1234.0;
}
#endif
}
void G1GCPhaseTimes::note_gc_end(double pause_end_time_sec) {
if (G1Log::fine()) {
double pause_time_ms = (pause_end_time_sec - _pause_start_time_sec) * MILLIUNITS;
for (uint i = 0; i < _active_gc_threads; i++) {
_par_last_gc_worker_times_ms[i] = _par_last_gc_worker_end_times_ms[i] -
_par_last_gc_worker_start_times_ms[i];
double worker_known_time = _par_last_ext_root_scan_times_ms[i] +
_par_last_satb_filtering_times_ms[i] +
_par_last_update_rs_times_ms[i] +
_par_last_scan_rs_times_ms[i] +
_par_last_obj_copy_times_ms[i] +
_par_last_termination_times_ms[i];
_par_last_gc_worker_other_times_ms[i] = _par_last_gc_worker_times_ms[i] -
worker_known_time;
}
print(pause_time_ms);
}
}
void G1GCPhaseTimes::print_par_stats(int level,
const char* str,
double* data,
bool showDecimals) {
double min = data[0], max = data[0];
double total = 0.0;
LineBuffer buf(level);
buf.append("[%s (ms):", str);
for (uint i = 0; i < _active_gc_threads; ++i) {
double val = data[i];
if (val < min)
min = val;
if (val > max)
max = val;
total += val;
if (G1Log::finest()) {
if (showDecimals) {
buf.append(" %.1lf", val);
} else {
buf.append(" %d", (int)val);
}
}
}
if (G1Log::finest()) {
buf.append_and_print_cr("");
}
double avg = total / (double) _active_gc_threads;
if (showDecimals) {
buf.append_and_print_cr(" Min: %.1lf, Avg: %.1lf, Max: %.1lf, Diff: %.1lf, Sum: %.1lf]",
min, avg, max, max - min, total);
} else {
buf.append_and_print_cr(" Min: %d, Avg: %d, Max: %d, Diff: %d, Sum: %d]",
(int)min, (int)avg, (int)max, (int)max - (int)min, (int)total);
}
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value) {
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms]", str, value);
}
void G1GCPhaseTimes::print_stats(int level, const char* str, double value, int workers) {
LineBuffer(level).append_and_print_cr("[%s: %.1lf ms, GC Workers: %d]", str, value, workers);
}
void G1GCPhaseTimes::print_stats(int level, const char* str, int value) {
LineBuffer(level).append_and_print_cr("[%s: %d]", str, value);
}
double G1GCPhaseTimes::avg_value(double* data) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
double ret = 0.0;
for (uint i = 0; i < _active_gc_threads; ++i) {
ret += data[i];
}
return ret / (double) _active_gc_threads;
} else {
return data[0];
}
}
double G1GCPhaseTimes::max_value(double* data) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
double ret = data[0];
for (uint i = 1; i < _active_gc_threads; ++i) {
if (data[i] > ret) {
ret = data[i];
}
}
return ret;
} else {
return data[0];
}
}
double G1GCPhaseTimes::sum_of_values(double* data) {
if (G1CollectedHeap::use_parallel_gc_threads()) {
double sum = 0.0;
for (uint i = 0; i < _active_gc_threads; i++) {
sum += data[i];
}
return sum;
} else {
return data[0];
}
}
double G1GCPhaseTimes::max_sum(double* data1, double* data2) {
double ret = data1[0] + data2[0];
if (G1CollectedHeap::use_parallel_gc_threads()) {
for (uint i = 1; i < _active_gc_threads; ++i) {
double data = data1[i] + data2[i];
if (data > ret) {
ret = data;
}
}
}
return ret;
}
void G1GCPhaseTimes::collapse_par_times() {
_ext_root_scan_time = avg_value(_par_last_ext_root_scan_times_ms);
_satb_filtering_time = avg_value(_par_last_satb_filtering_times_ms);
_update_rs_time = avg_value(_par_last_update_rs_times_ms);
_update_rs_processed_buffers =
sum_of_values(_par_last_update_rs_processed_buffers);
_scan_rs_time = avg_value(_par_last_scan_rs_times_ms);
_obj_copy_time = avg_value(_par_last_obj_copy_times_ms);
_termination_time = avg_value(_par_last_termination_times_ms);
}
double G1GCPhaseTimes::accounted_time_ms() {
// Subtract the root region scanning wait time. It's initialized to
// zero at the start of the pause.
double misc_time_ms = _root_region_scan_wait_time_ms;
misc_time_ms += _cur_collection_par_time_ms;
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
// Subtract the time taken to clean the card table from the
// current value of "other time"
misc_time_ms += _cur_clear_ct_time_ms;
return misc_time_ms;
}
void G1GCPhaseTimes::print(double pause_time_ms) {
if (PrintGCTimeStamps) {
gclog_or_tty->stamp();
gclog_or_tty->print(": ");
}
GCCauseString gc_cause_str = GCCauseString("GC pause", _gc_cause)
.append(_is_young_gc ? " (young)" : " (mixed)")
.append(_is_initial_mark_gc ? " (initial-mark)" : "");
gclog_or_tty->print_cr("[%s, %3.7f secs]", (const char*)gc_cause_str, pause_time_ms / 1000.0);
if (!G1Log::finer()) {
return;
}
if (_root_region_scan_wait_time_ms > 0.0) {
print_stats(1, "Root Region Scan Waiting", _root_region_scan_wait_time_ms);
}
if (G1CollectedHeap::use_parallel_gc_threads()) {
print_stats(1, "Parallel Time", _cur_collection_par_time_ms, _active_gc_threads);
print_par_stats(2, "GC Worker Start", _par_last_gc_worker_start_times_ms);
print_par_stats(2, "Ext Root Scanning", _par_last_ext_root_scan_times_ms);
if (_satb_filtering_time > 0.0) {
print_par_stats(2, "SATB Filtering", _par_last_satb_filtering_times_ms);
}
print_par_stats(2, "Update RS", _par_last_update_rs_times_ms);
if (G1Log::finest()) {
print_par_stats(3, "Processed Buffers", _par_last_update_rs_processed_buffers,
false /* showDecimals */);
}
print_par_stats(2, "Scan RS", _par_last_scan_rs_times_ms);
print_par_stats(2, "Object Copy", _par_last_obj_copy_times_ms);
print_par_stats(2, "Termination", _par_last_termination_times_ms);
if (G1Log::finest()) {
print_par_stats(3, "Termination Attempts", _par_last_termination_attempts,
false /* showDecimals */);
}
print_par_stats(2, "GC Worker Other", _par_last_gc_worker_other_times_ms);
print_par_stats(2, "GC Worker Total", _par_last_gc_worker_times_ms);
print_par_stats(2, "GC Worker End", _par_last_gc_worker_end_times_ms);
} else {
print_stats(1, "Ext Root Scanning", _ext_root_scan_time);
if (_satb_filtering_time > 0.0) {
print_stats(1, "SATB Filtering", _satb_filtering_time);
}
print_stats(1, "Update RS", _update_rs_time);
if (G1Log::finest()) {
print_stats(2, "Processed Buffers", (int)_update_rs_processed_buffers);
}
print_stats(1, "Scan RS", _scan_rs_time);
print_stats(1, "Object Copying", _obj_copy_time);
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Clear CT", _cur_clear_ct_time_ms);
if (Verbose && G1Log::finest()) {
print_stats(1, "Cur Clear CC", _cur_clear_cc_time_ms);
print_stats(1, "Cum Clear CC", _cum_clear_cc_time_ms);
print_stats(1, "Min Clear CC", _min_clear_cc_time_ms);
print_stats(1, "Max Clear CC", _max_clear_cc_time_ms);
if (_num_cc_clears > 0) {
print_stats(1, "Avg Clear CC", _cum_clear_cc_time_ms / ((double)_num_cc_clears));
}
}
double misc_time_ms = pause_time_ms - accounted_time_ms();
print_stats(1, "Other", misc_time_ms);
print_stats(2, "Choose CSet",
(_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms));
print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
print_stats(2, "Free CSet",
(_recorded_young_free_cset_time_ms +
_recorded_non_young_free_cset_time_ms));
}
void G1GCPhaseTimes::record_cc_clear_time_ms(double ms) {
if (!(Verbose && G1Log::finest())) {
return;
}
if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms) {
_min_clear_cc_time_ms = ms;
}
if (_max_clear_cc_time_ms < 0.0 || ms >= _max_clear_cc_time_ms) {
_max_clear_cc_time_ms = ms;
}
_cur_clear_cc_time_ms = ms;
_cum_clear_cc_time_ms += ms;
_num_cc_clears++;
}
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
#include "memory/allocation.hpp"
#include "gc_interface/gcCause.hpp"
class G1GCPhaseTimes : public CHeapObj<mtGC> {
friend class G1CollectorPolicy;
friend class TraceGen0TimeData;
private:
uint _active_gc_threads;
uint _max_gc_threads;
GCCause::Cause _gc_cause;
bool _is_young_gc;
bool _is_initial_mark_gc;
double _pause_start_time_sec;
double* _par_last_gc_worker_start_times_ms;
double* _par_last_ext_root_scan_times_ms;
double* _par_last_satb_filtering_times_ms;
double* _par_last_update_rs_times_ms;
double* _par_last_update_rs_processed_buffers;
double* _par_last_scan_rs_times_ms;
double* _par_last_obj_copy_times_ms;
double* _par_last_termination_times_ms;
double* _par_last_termination_attempts;
double* _par_last_gc_worker_end_times_ms;
double* _par_last_gc_worker_times_ms;
double* _par_last_gc_worker_other_times_ms;
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_clear_ct_time_ms;
double _cur_ref_proc_time_ms;
double _cur_ref_enq_time_ms;
// Helper methods for detailed logging
void print_par_stats(int level, const char* str, double* data, bool showDecimals = true);
void print_stats(int level, const char* str, double value);
void print_stats(int level, const char* str, double value, int workers);
void print_stats(int level, const char* str, int value);
double avg_value(double* data);
double max_value(double* data);
double sum_of_values(double* data);
double max_sum(double* data1, double* data2);
double accounted_time_ms();
// Card Table Count Cache stats
double _min_clear_cc_time_ms; // min
double _max_clear_cc_time_ms; // max
double _cur_clear_cc_time_ms; // clearing time during current pause
double _cum_clear_cc_time_ms; // cummulative clearing time
jlong _num_cc_clears; // number of times the card count cache has been cleared
// The following insance variables are directly accessed by G1CollectorPolicy
// and TraceGen0TimeData. This is why those classes are declared friends.
// An alternative is to add getters and setters for all of these fields.
// It might also be possible to restructure the code to reduce these
// dependencies.
double _ext_root_scan_time;
double _satb_filtering_time;
double _update_rs_time;
double _update_rs_processed_buffers;
double _scan_rs_time;
double _obj_copy_time;
double _termination_time;
double _cur_collection_start_sec;
double _root_region_scan_wait_time_ms;
double _recorded_young_cset_choice_time_ms;
double _recorded_non_young_cset_choice_time_ms;
double _recorded_young_free_cset_time_ms;
double _recorded_non_young_free_cset_time_ms;
void print(double pause_time_ms);
public:
G1GCPhaseTimes(uint max_gc_threads);
void note_gc_start(double pause_start_time_sec, uint active_gc_threads,
bool is_young_gc, bool is_initial_mark_gc, GCCause::Cause gc_cause);
void note_gc_end(double pause_end_time_sec);
void collapse_par_times();
void record_gc_worker_start_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_gc_worker_start_times_ms[worker_i] = ms;
}
void record_ext_root_scan_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_ext_root_scan_times_ms[worker_i] = ms;
}
void record_satb_filtering_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_satb_filtering_times_ms[worker_i] = ms;
}
void record_update_rs_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_update_rs_times_ms[worker_i] = ms;
}
void record_update_rs_processed_buffers (uint worker_i,
double processed_buffers) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_update_rs_processed_buffers[worker_i] = processed_buffers;
}
void record_scan_rs_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_scan_rs_times_ms[worker_i] = ms;
}
void reset_obj_copy_time(uint worker_i) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_obj_copy_times_ms[worker_i] = 0.0;
}
void reset_obj_copy_time() {
reset_obj_copy_time(0);
}
void record_obj_copy_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_obj_copy_times_ms[worker_i] += ms;
}
void record_termination(uint worker_i, double ms, size_t attempts) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_termination_times_ms[worker_i] = ms;
_par_last_termination_attempts[worker_i] = (double) attempts;
}
void record_gc_worker_end_time(uint worker_i, double ms) {
assert(worker_i >= 0, "worker index must be > 0");
assert(worker_i < _active_gc_threads, "worker index out of bounds");
_par_last_gc_worker_end_times_ms[worker_i] = ms;
}
void record_clear_ct_time(double ms) {
_cur_clear_ct_time_ms = ms;
}
void record_par_time(double ms) {
_cur_collection_par_time_ms = ms;
}
void record_code_root_fixup_time(double ms) {
_cur_collection_code_root_fixup_time_ms = ms;
}
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
void record_ref_enq_time(double ms) {
_cur_ref_enq_time_ms = ms;
}
void record_root_region_scan_wait_time(double time_ms) {
_root_region_scan_wait_time_ms = time_ms;
}
void record_cc_clear_time_ms(double ms);
void record_young_free_cset_time_ms(double time_ms) {
_recorded_young_free_cset_time_ms = time_ms;
}
void record_non_young_free_cset_time_ms(double time_ms) {
_recorded_non_young_free_cset_time_ms = time_ms;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1GCPHASETIMESLOG_HPP
......@@ -29,6 +29,7 @@
#include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
......@@ -224,7 +225,7 @@ void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
assert( _cards_scanned != NULL, "invariant" );
_cards_scanned[worker_i] = scanRScl.cards_done();
_g1p->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
_g1p->phase_times()->record_scan_rs_time(worker_i, scan_rs_time_sec * 1000.0);
}
// Closure used for updating RSets and recording references that
......@@ -276,7 +277,7 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
guarantee(cl.n() == 0, "Card table should be clean.");
}
_g1p->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
_g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
}
class CountRSSizeClosure: public HeapRegionClosure {
......@@ -390,13 +391,13 @@ void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
if (G1UseParallelRSetUpdating || (worker_i == 0)) {
updateRS(&into_cset_dcq, worker_i);
} else {
_g1p->record_update_rs_processed_buffers(worker_i, 0.0);
_g1p->record_update_rs_time(worker_i, 0.0);
_g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0.0);
_g1p->phase_times()->record_update_rs_time(worker_i, 0.0);
}
if (G1UseParallelRSetScanning || (worker_i == 0)) {
scanRS(oc, worker_i);
} else {
_g1p->record_scan_rs_time(worker_i, 0.0);
_g1p->phase_times()->record_scan_rs_time(worker_i, 0.0);
}
// We now clear the cached values of _cset_rs_update_cl for this worker
......
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -375,7 +375,6 @@ IRT_ENTRY(address, InterpreterRuntime::exception_handler_for_exception(JavaThrea
Handle h_exception(thread, exception);
methodHandle h_method (thread, method(thread));
constantPoolHandle h_constants(thread, h_method->constants());
typeArrayHandle h_extable (thread, h_method->exception_table());
bool should_repeat;
int handler_bci;
int current_bci = bci(thread);
......@@ -547,23 +546,6 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_get_put(JavaThread* thread, Bytecode
}
}
if (is_put && !is_static && klass->is_subclass_of(SystemDictionary::CallSite_klass()) && (info.name() == vmSymbols::target_name())) {
const jint direction = frame::interpreter_frame_expression_stack_direction();
Handle call_site (THREAD, *((oop*) thread->last_frame().interpreter_frame_tos_at(-1 * direction)));
Handle method_handle(THREAD, *((oop*) thread->last_frame().interpreter_frame_tos_at( 0 * direction)));
assert(call_site ->is_a(SystemDictionary::CallSite_klass()), "must be");
assert(method_handle->is_a(SystemDictionary::MethodHandle_klass()), "must be");
{
// Walk all nmethods depending on this call site.
MutexLocker mu(Compile_lock, thread);
Universe::flush_dependents_on(call_site, method_handle);
}
// Don't allow fast path for setting CallSite.target and sub-classes.
put_code = (Bytecodes::Code) 0;
}
cache_entry(thread)->set_field(
get_code,
put_code,
......
......@@ -231,8 +231,6 @@ public:
if (obj->is_constMethod()) {
mark_object(obj);
mark_object(constMethodOop(obj)->stackmap_data());
// Exception tables are needed by ci code during compilation.
mark_object(constMethodOop(obj)->exception_table());
}
// Mark objects referenced by klass objects which are read-only.
......@@ -513,7 +511,6 @@ public:
for(i = 0; i < methods->length(); i++) {
methodOop m = methodOop(methods->obj_at(i));
mark_and_move_for_policy(OP_favor_startup, m->constMethod(), _move_ro);
mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->exception_table(), _move_ro);
mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->stackmap_data(), _move_ro);
}
......
......@@ -140,14 +140,15 @@ klassOop oopFactory::new_instanceKlass(Symbol* name, int vtable_len, int itable_
constMethodOop oopFactory::new_constMethod(int byte_code_size,
int compressed_line_number_size,
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS) {
klassOop cmkObj = Universe::constMethodKlassObj();
constMethodKlass* cmk = constMethodKlass::cast(cmkObj);
return cmk->allocate(byte_code_size, compressed_line_number_size,
localvariable_table_length, checked_exceptions_length,
is_conc_safe,
localvariable_table_length, exception_table_length,
checked_exceptions_length, is_conc_safe,
CHECK_NULL);
}
......@@ -155,6 +156,7 @@ constMethodOop oopFactory::new_constMethod(int byte_code_size,
methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags,
int compressed_line_number_size,
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS) {
......@@ -164,6 +166,7 @@ methodOop oopFactory::new_method(int byte_code_size, AccessFlags access_flags,
constMethodOop cm = new_constMethod(byte_code_size,
compressed_line_number_size,
localvariable_table_length,
exception_table_length,
checked_exceptions_length,
is_conc_safe, CHECK_NULL);
constMethodHandle rw(THREAD, cm);
......
......@@ -86,6 +86,7 @@ private:
static constMethodOop new_constMethod(int byte_code_size,
int compressed_line_number_size,
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS);
......@@ -97,6 +98,7 @@ public:
AccessFlags access_flags,
int compressed_line_number_size,
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS);
......
......@@ -65,6 +65,7 @@ bool constMethodKlass::oop_is_conc_safe(oop obj) const {
constMethodOop constMethodKlass::allocate(int byte_code_size,
int compressed_line_number_size,
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS) {
......@@ -72,6 +73,7 @@ constMethodOop constMethodKlass::allocate(int byte_code_size,
int size = constMethodOopDesc::object_size(byte_code_size,
compressed_line_number_size,
localvariable_table_length,
exception_table_length,
checked_exceptions_length);
KlassHandle h_k(THREAD, as_klassOop());
constMethodOop cm = (constMethodOop)
......@@ -82,12 +84,12 @@ constMethodOop constMethodKlass::allocate(int byte_code_size,
cm->init_fingerprint();
cm->set_constants(NULL);
cm->set_stackmap_data(NULL);
cm->set_exception_table(NULL);
cm->set_code_size(byte_code_size);
cm->set_constMethod_size(size);
cm->set_inlined_tables_length(checked_exceptions_length,
compressed_line_number_size,
localvariable_table_length);
localvariable_table_length,
exception_table_length);
assert(cm->size() == size, "wrong size for object");
cm->set_is_conc_safe(is_conc_safe);
cm->set_partially_loaded();
......@@ -100,7 +102,6 @@ void constMethodKlass::oop_follow_contents(oop obj) {
constMethodOop cm = constMethodOop(obj);
MarkSweep::mark_and_push(cm->adr_constants());
MarkSweep::mark_and_push(cm->adr_stackmap_data());
MarkSweep::mark_and_push(cm->adr_exception_table());
// Performance tweak: We skip iterating over the klass pointer since we
// know that Universe::constMethodKlassObj never moves.
}
......@@ -112,7 +113,6 @@ void constMethodKlass::oop_follow_contents(ParCompactionManager* cm,
constMethodOop cm_oop = constMethodOop(obj);
PSParallelCompact::mark_and_push(cm, cm_oop->adr_constants());
PSParallelCompact::mark_and_push(cm, cm_oop->adr_stackmap_data());
PSParallelCompact::mark_and_push(cm, cm_oop->adr_exception_table());
// Performance tweak: We skip iterating over the klass pointer since we
// know that Universe::constMethodKlassObj never moves.
}
......@@ -123,7 +123,6 @@ int constMethodKlass::oop_oop_iterate(oop obj, OopClosure* blk) {
constMethodOop cm = constMethodOop(obj);
blk->do_oop(cm->adr_constants());
blk->do_oop(cm->adr_stackmap_data());
blk->do_oop(cm->adr_exception_table());
// Get size before changing pointers.
// Don't call size() or oop_size() since that is a virtual call.
int size = cm->object_size();
......@@ -139,8 +138,6 @@ int constMethodKlass::oop_oop_iterate_m(oop obj, OopClosure* blk, MemRegion mr)
if (mr.contains(adr)) blk->do_oop(adr);
adr = cm->adr_stackmap_data();
if (mr.contains(adr)) blk->do_oop(adr);
adr = cm->adr_exception_table();
if (mr.contains(adr)) blk->do_oop(adr);
// Get size before changing pointers.
// Don't call size() or oop_size() since that is a virtual call.
int size = cm->object_size();
......@@ -155,7 +152,6 @@ int constMethodKlass::oop_adjust_pointers(oop obj) {
constMethodOop cm = constMethodOop(obj);
MarkSweep::adjust_pointer(cm->adr_constants());
MarkSweep::adjust_pointer(cm->adr_stackmap_data());
MarkSweep::adjust_pointer(cm->adr_exception_table());
// Get size before changing pointers.
// Don't call size() or oop_size() since that is a virtual call.
int size = cm->object_size();
......@@ -190,7 +186,6 @@ void constMethodKlass::oop_print_on(oop obj, outputStream* st) {
constMethodOop m = constMethodOop(obj);
st->print(" - constants: " INTPTR_FORMAT " ", (address)m->constants());
m->constants()->print_value_on(st); st->cr();
st->print(" - exceptions: " INTPTR_FORMAT "\n", (address)m->exception_table());
if (m->has_stackmap_table()) {
st->print(" - stackmap data: ");
m->stackmap_data()->print_value_on(st);
......@@ -228,8 +223,6 @@ void constMethodKlass::oop_verify_on(oop obj, outputStream* st) {
typeArrayOop stackmap_data = m->stackmap_data();
guarantee(stackmap_data == NULL ||
stackmap_data->is_perm(), "should be in permspace");
guarantee(m->exception_table()->is_perm(), "should be in permspace");
guarantee(m->exception_table()->is_typeArray(), "should be type array");
address m_end = (address)((oop*) m + m->size());
address compressed_table_start = m->code_end();
......@@ -244,11 +237,15 @@ void constMethodKlass::oop_verify_on(oop obj, outputStream* st) {
compressed_table_end += stream.position();
}
guarantee(compressed_table_end <= m_end, "invalid method layout");
// Verify checked exceptions and local variable tables
// Verify checked exceptions, exception table and local variable tables
if (m->has_checked_exceptions()) {
u2* addr = m->checked_exceptions_length_addr();
guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout");
}
if (m->has_exception_handler()) {
u2* addr = m->exception_table_length_addr();
guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout");
}
if (m->has_localvariable_table()) {
u2* addr = m->localvariable_table_length_addr();
guarantee(*addr > 0 && (address) addr >= compressed_table_end && (address) addr < m_end, "invalid method layout");
......@@ -257,13 +254,13 @@ void constMethodKlass::oop_verify_on(oop obj, outputStream* st) {
u2* uncompressed_table_start;
if (m->has_localvariable_table()) {
uncompressed_table_start = (u2*) m->localvariable_table_start();
} else {
if (m->has_checked_exceptions()) {
} else if (m->has_exception_handler()) {
uncompressed_table_start = (u2*) m->exception_table_start();
} else if (m->has_checked_exceptions()) {
uncompressed_table_start = (u2*) m->checked_exceptions_start();
} else {
uncompressed_table_start = (u2*) m_end;
}
}
int gap = (intptr_t) uncompressed_table_start - (intptr_t) compressed_table_end;
int max_gap = align_object_size(1)*BytesPerWord;
guarantee(gap >= 0 && gap < max_gap, "invalid method layout");
......@@ -273,8 +270,8 @@ void constMethodKlass::oop_verify_on(oop obj, outputStream* st) {
bool constMethodKlass::oop_partially_loaded(oop obj) const {
assert(obj->is_constMethod(), "object must be klass");
constMethodOop m = constMethodOop(obj);
// check whether exception_table points to self (flag for partially loaded)
return m->exception_table() == (typeArrayOop)obj;
// check whether stackmap_data points to self (flag for partially loaded)
return m->stackmap_data() == (typeArrayOop)obj;
}
......@@ -282,6 +279,6 @@ bool constMethodKlass::oop_partially_loaded(oop obj) const {
void constMethodKlass::oop_set_partially_loaded(oop obj) {
assert(obj->is_constMethod(), "object must be klass");
constMethodOop m = constMethodOop(obj);
// Temporarily set exception_table to point to self
m->set_exception_table((typeArrayOop)obj);
// Temporarily set stackmap_data to point to self
m->set_stackmap_data((typeArrayOop)obj);
}
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -46,6 +46,7 @@ public:
DEFINE_ALLOCATE_PERMANENT(constMethodKlass);
constMethodOop allocate(int byte_code_size, int compressed_line_number_size,
int localvariable_table_length,
int exception_table_length,
int checked_exceptions_length,
bool is_conc_safe,
TRAPS);
......
......@@ -35,6 +35,7 @@ const u2 constMethodOopDesc::UNSET_IDNUM = 0xFFFF;
int constMethodOopDesc::object_size(int code_size,
int compressed_line_number_size,
int local_variable_table_length,
int exception_table_length,
int checked_exceptions_length) {
int extra_bytes = code_size;
if (compressed_line_number_size > 0) {
......@@ -49,6 +50,10 @@ int constMethodOopDesc::object_size(int code_size,
extra_bytes +=
local_variable_table_length * sizeof(LocalVariableTableElement);
}
if (exception_table_length > 0) {
extra_bytes += sizeof(u2);
extra_bytes += exception_table_length * sizeof(ExceptionTableElement);
}
int extra_words = align_size_up(extra_bytes, BytesPerWord) / BytesPerWord;
return align_object_size(header_size() + extra_words);
}
......@@ -73,8 +78,23 @@ u2* constMethodOopDesc::checked_exceptions_length_addr() const {
return last_u2_element();
}
u2* constMethodOopDesc::exception_table_length_addr() const {
assert(has_exception_handler(), "called only if table is present");
if (has_checked_exceptions()) {
// If checked_exception present, locate immediately before them.
return (u2*) checked_exceptions_start() - 1;
} else {
// Else, the exception table is at the end of the constMethod.
return last_u2_element();
}
}
u2* constMethodOopDesc::localvariable_table_length_addr() const {
assert(has_localvariable_table(), "called only if table is present");
if (has_exception_handler()) {
// If exception_table present, locate immediately before them.
return (u2*) exception_table_start() - 1;
} else {
if (has_checked_exceptions()) {
// If checked_exception present, locate immediately before them.
return (u2*) checked_exceptions_start() - 1;
......@@ -82,6 +102,7 @@ u2* constMethodOopDesc::localvariable_table_length_addr() const {
// Else, the linenumber table is at the end of the constMethod.
return last_u2_element();
}
}
}
......@@ -89,7 +110,8 @@ u2* constMethodOopDesc::localvariable_table_length_addr() const {
void constMethodOopDesc::set_inlined_tables_length(
int checked_exceptions_len,
int compressed_line_number_size,
int localvariable_table_len) {
int localvariable_table_len,
int exception_table_len) {
// Must be done in the order below, otherwise length_addr accessors
// will not work. Only set bit in header if length is positive.
assert(_flags == 0, "Error");
......@@ -100,6 +122,10 @@ void constMethodOopDesc::set_inlined_tables_length(
_flags |= _has_checked_exceptions;
*(checked_exceptions_length_addr()) = checked_exceptions_len;
}
if (exception_table_len > 0) {
_flags |= _has_exception_table;
*(exception_table_length_addr()) = exception_table_len;
}
if (localvariable_table_len > 0) {
_flags |= _has_localvariable_table;
*(localvariable_table_length_addr()) = localvariable_table_len;
......@@ -133,3 +159,15 @@ LocalVariableTableElement* constMethodOopDesc::localvariable_table_start() const
addr -= length * sizeof(LocalVariableTableElement) / sizeof(u2);
return (LocalVariableTableElement*) addr;
}
int constMethodOopDesc::exception_table_length() const {
return has_exception_handler() ? *(exception_table_length_addr()) : 0;
}
ExceptionTableElement* constMethodOopDesc::exception_table_start() const {
u2* addr = exception_table_length_addr();
u2 length = *addr;
assert(length > 0, "should only be called if table is present");
addr -= length * sizeof(ExceptionTableElement) / sizeof(u2);
return (ExceptionTableElement*)addr;
}
......@@ -43,7 +43,6 @@
// | fingerprint 2 |
// | constants (oop) |
// | stackmap_data (oop) |
// | exception_table (oop) |
// | constMethod_size |
// | interp_kind | flags | code_size |
// | name index | signature index |
......@@ -64,7 +63,13 @@
// | (length is u2, elements are 6-tuples of u2) |
// | (see class LocalVariableTableElement) |
// | (access flags bit tells whether table is present) |
// | (indexed from end of contMethodOop) |
// | (indexed from end of constMethodOop) |
// |------------------------------------------------------|
// | exception table + length (length last) |
// | (length is u2, elements are 4-tuples of u2) |
// | (see class ExceptionTableElement) |
// | (access flags bit tells whether table is present) |
// | (indexed from end of constMethodOop) |
// |------------------------------------------------------|
// | checked exceptions elements + length (length last) |
// | (length is u2, elements are u2) |
......@@ -93,6 +98,15 @@ class LocalVariableTableElement VALUE_OBJ_CLASS_SPEC {
};
// Utitily class describing elements in exception table
class ExceptionTableElement VALUE_OBJ_CLASS_SPEC {
public:
u2 start_pc;
u2 end_pc;
u2 handler_pc;
u2 catch_type_index;
};
class constMethodOopDesc : public oopDesc {
friend class constMethodKlass;
friend class VMStructs;
......@@ -100,7 +114,8 @@ private:
enum {
_has_linenumber_table = 1,
_has_checked_exceptions = 2,
_has_localvariable_table = 4
_has_localvariable_table = 4,
_has_exception_table = 8
};
// Bit vector of signature
......@@ -114,7 +129,7 @@ private:
public:
oop* oop_block_beg() const { return adr_constants(); }
oop* oop_block_end() const { return adr_exception_table() + 1; }
oop* oop_block_end() const { return adr_stackmap_data() + 1; }
private:
//
......@@ -126,11 +141,6 @@ private:
// Raw stackmap data for the method
typeArrayOop _stackmap_data;
// The exception handler table. 4-tuples of ints [start_pc, end_pc,
// handler_pc, catch_type index] For methods with no exceptions the
// table is pointing to Universe::the_empty_int_array
typeArrayOop _exception_table;
//
// End of the oop block.
//
......@@ -152,7 +162,8 @@ public:
// Inlined tables
void set_inlined_tables_length(int checked_exceptions_len,
int compressed_line_number_size,
int localvariable_table_len);
int localvariable_table_len,
int exception_table_len);
bool has_linenumber_table() const
{ return (_flags & _has_linenumber_table) != 0; }
......@@ -163,6 +174,9 @@ public:
bool has_localvariable_table() const
{ return (_flags & _has_localvariable_table) != 0; }
bool has_exception_handler() const
{ return (_flags & _has_exception_table) != 0; }
void set_interpreter_kind(int kind) { _interpreter_kind = kind; }
int interpreter_kind(void) const { return _interpreter_kind; }
......@@ -181,11 +195,6 @@ public:
}
bool has_stackmap_table() const { return _stackmap_data != NULL; }
// exception handler table
typeArrayOop exception_table() const { return _exception_table; }
void set_exception_table(typeArrayOop e) { oop_store_without_check((oop*) &_exception_table, (oop) e); }
bool has_exception_handler() const { return exception_table() != NULL && exception_table()->length() > 0; }
void init_fingerprint() {
const uint64_t initval = CONST64(0x8000000000000000);
_fingerprint = initval;
......@@ -235,6 +244,7 @@ public:
// Object size needed
static int object_size(int code_size, int compressed_line_number_size,
int local_variable_table_length,
int exception_table_length,
int checked_exceptions_length);
int object_size() const { return _constMethod_size; }
......@@ -256,6 +266,7 @@ public:
u_char* compressed_linenumber_table() const; // not preserved by gc
u2* checked_exceptions_length_addr() const;
u2* localvariable_table_length_addr() const;
u2* exception_table_length_addr() const;
// checked exceptions
int checked_exceptions_length() const;
......@@ -265,6 +276,10 @@ public:
int localvariable_table_length() const;
LocalVariableTableElement* localvariable_table_start() const;
// exception table
int exception_table_length() const;
ExceptionTableElement* exception_table_start() const;
// byte codes
void set_code(address code) {
if (code_size() > 0) {
......@@ -282,13 +297,10 @@ public:
// interpreter support
static ByteSize constants_offset()
{ return byte_offset_of(constMethodOopDesc, _constants); }
static ByteSize exception_table_offset()
{ return byte_offset_of(constMethodOopDesc, _exception_table); }
// Garbage collection support
oop* adr_constants() const { return (oop*)&_constants; }
oop* adr_stackmap_data() const { return (oop*)&_stackmap_data; }
oop* adr_exception_table() const { return (oop*)&_exception_table; }
bool is_conc_safe() { return _is_conc_safe; }
void set_is_conc_safe(bool v) { _is_conc_safe = v; }
......
/*
* Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -400,10 +400,9 @@ void GenerateOopMap::mark_bbheaders_and_count_gc_points() {
bool fellThrough = false; // False to get first BB marked.
// First mark all exception handlers as start of a basic-block
typeArrayOop excps = method()->exception_table();
for(int i = 0; i < excps->length(); i += 4) {
int handler_pc_idx = i+2;
bb_mark_fct(this, excps->int_at(handler_pc_idx), NULL);
ExceptionTable excps(method());
for(int i = 0; i < excps.length(); i ++) {
bb_mark_fct(this, excps.handler_pc(i), NULL);
}
// Then iterate through the code
......@@ -450,10 +449,9 @@ void GenerateOopMap::mark_reachable_code() {
// Mark entry basic block as alive and all exception handlers
_basic_blocks[0].mark_as_alive();
typeArrayOop excps = method()->exception_table();
for(int i = 0; i < excps->length(); i += 4) {
int handler_pc_idx = i+2;
BasicBlock *bb = get_basic_block_at(excps->int_at(handler_pc_idx));
ExceptionTable excps(method());
for(int i = 0; i < excps.length(); i++) {
BasicBlock *bb = get_basic_block_at(excps.handler_pc(i));
// If block is not already alive (due to multiple exception handlers to same bb), then
// make it alive
if (bb->is_dead()) bb->mark_as_alive();
......@@ -1181,12 +1179,12 @@ void GenerateOopMap::do_exception_edge(BytecodeStream* itr) {
if (_has_exceptions) {
int bci = itr->bci();
typeArrayOop exct = method()->exception_table();
for(int i = 0; i< exct->length(); i+=4) {
int start_pc = exct->int_at(i);
int end_pc = exct->int_at(i+1);
int handler_pc = exct->int_at(i+2);
int catch_type = exct->int_at(i+3);
ExceptionTable exct(method());
for(int i = 0; i< exct.length(); i++) {
int start_pc = exct.start_pc(i);
int end_pc = exct.end_pc(i);
int handler_pc = exct.handler_pc(i);
int catch_type = exct.catch_type_index(i);
if (start_pc <= bci && bci < end_pc) {
BasicBlock *excBB = get_basic_block_at(handler_pc);
......@@ -2055,7 +2053,7 @@ void GenerateOopMap::compute_map(TRAPS) {
_conflict = false;
_max_locals = method()->max_locals();
_max_stack = method()->max_stack();
_has_exceptions = (method()->exception_table()->length() > 0);
_has_exceptions = (method()->has_exception_handler());
_nof_refval_conflicts = 0;
_init_vars = new GrowableArray<intptr_t>(5); // There are seldom more than 5 init_vars
_report_result = false;
......@@ -2070,9 +2068,10 @@ void GenerateOopMap::compute_map(TRAPS) {
if (Verbose) {
_method->print_codes();
tty->print_cr("Exception table:");
typeArrayOop excps = method()->exception_table();
for(int i = 0; i < excps->length(); i += 4) {
tty->print_cr("[%d - %d] -> %d", excps->int_at(i + 0), excps->int_at(i + 1), excps->int_at(i + 2));
ExceptionTable excps(method());
for(int i = 0; i < excps.length(); i ++) {
tty->print_cr("[%d - %d] -> %d",
excps.start_pc(i), excps.end_pc(i), excps.handler_pc(i));
}
}
}
......
......@@ -111,25 +111,21 @@ char* methodOopDesc::name_and_sig_as_C_string(Klass* klass, Symbol* method_name,
int methodOopDesc::fast_exception_handler_bci_for(KlassHandle ex_klass, int throw_bci, TRAPS) {
// exception table holds quadruple entries of the form (beg_bci, end_bci, handler_bci, klass_index)
const int beg_bci_offset = 0;
const int end_bci_offset = 1;
const int handler_bci_offset = 2;
const int klass_index_offset = 3;
const int entry_size = 4;
// access exception table
typeArrayHandle table (THREAD, constMethod()->exception_table());
int length = table->length();
assert(length % entry_size == 0, "exception table format has changed");
ExceptionTable table(this);
int length = table.length();
// iterate through all entries sequentially
constantPoolHandle pool(THREAD, constants());
for (int i = 0; i < length; i += entry_size) {
int beg_bci = table->int_at(i + beg_bci_offset);
int end_bci = table->int_at(i + end_bci_offset);
for (int i = 0; i < length; i ++) {
//reacquire the table in case a GC happened
ExceptionTable table(this);
int beg_bci = table.start_pc(i);
int end_bci = table.end_pc(i);
assert(beg_bci <= end_bci, "inconsistent exception table");
if (beg_bci <= throw_bci && throw_bci < end_bci) {
// exception handler bci range covers throw_bci => investigate further
int handler_bci = table->int_at(i + handler_bci_offset);
int klass_index = table->int_at(i + klass_index_offset);
int handler_bci = table.handler_pc(i);
int klass_index = table.catch_type_index(i);
if (klass_index == 0) {
return handler_bci;
} else if (ex_klass.is_null()) {
......@@ -980,7 +976,7 @@ methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
{
int flags_bits = (JVM_MH_INVOKE_BITS | JVM_ACC_PUBLIC | JVM_ACC_FINAL);
methodOop m_oop = oopFactory::new_method(0, accessFlags_from(flags_bits),
0, 0, 0, IsSafeConc, CHECK_(empty));
0, 0, 0, 0, IsSafeConc, CHECK_(empty));
m = methodHandle(THREAD, m_oop);
}
m->set_constants(cp());
......@@ -994,7 +990,6 @@ methodHandle methodOopDesc::make_invoke_method(KlassHandle holder,
m->set_result_index(rtf.type());
#endif
m->compute_size_of_parameters(THREAD);
m->set_exception_table(Universe::the_empty_int_array());
m->init_intrinsic_id();
assert(m->intrinsic_id() == vmIntrinsics::_invokeExact ||
m->intrinsic_id() == vmIntrinsics::_invokeGeneric, "must be an invoker");
......@@ -1038,6 +1033,7 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
AccessFlags flags = m->access_flags();
int checked_exceptions_len = m->checked_exceptions_length();
int localvariable_len = m->localvariable_table_length();
int exception_table_len = m->exception_table_length();
// Allocate newm_oop with the is_conc_safe parameter set
// to IsUnsafeConc to indicate that newm_oop is not yet
// safe for concurrent processing by a GC.
......@@ -1045,6 +1041,7 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
flags,
new_compressed_linenumber_size,
localvariable_len,
exception_table_len,
checked_exceptions_len,
IsUnsafeConc,
CHECK_(methodHandle()));
......@@ -1085,6 +1082,7 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
newm->set_method_size(new_method_size);
assert(newm->code_size() == new_code_length, "check");
assert(newm->checked_exceptions_length() == checked_exceptions_len, "check");
assert(newm->exception_table_length() == exception_table_len, "check");
assert(newm->localvariable_table_length() == localvariable_len, "check");
// Copy new byte codes
memcpy(newm->code_base(), new_code, new_code_length);
......@@ -1100,6 +1098,12 @@ methodHandle methodOopDesc:: clone_with_new_data(methodHandle m, u_char* new_cod
m->checked_exceptions_start(),
checked_exceptions_len * sizeof(CheckedExceptionElement));
}
// Copy exception table
if (exception_table_len > 0) {
memcpy(newm->exception_table_start(),
m->exception_table_start(),
exception_table_len * sizeof(ExceptionTableElement));
}
// Copy local variable number table
if (localvariable_len > 0) {
memcpy(newm->localvariable_table_start(),
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册