提交 054034d1 编写于 作者: A amurillo

Merge

......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=11
HS_BUILD_NUMBER=12
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -1128,51 +1128,82 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
// Hoist any int/ptr/long's in the first 6 to int regs.
// Hoist any flt/dbl's in the first 16 dbl regs.
int j = 0; // Count of actual args, not HALVES
for( int i=0; i<total_args_passed; i++, j++ ) {
switch( sig_bt[i] ) {
VMRegPair param_array_reg; // location of the argument in the parameter array
for (int i = 0; i < total_args_passed; i++, j++) {
param_array_reg.set_bad();
switch (sig_bt[i]) {
case T_BOOLEAN:
case T_BYTE:
case T_CHAR:
case T_INT:
case T_SHORT:
regs[i].set1( int_stk_helper( j ) ); break;
regs[i].set1(int_stk_helper(j));
break;
case T_LONG:
assert( sig_bt[i+1] == T_VOID, "expecting half" );
assert(sig_bt[i+1] == T_VOID, "expecting half");
case T_ADDRESS: // raw pointers, like current thread, for VM calls
case T_ARRAY:
case T_OBJECT:
case T_METADATA:
regs[i].set2( int_stk_helper( j ) );
regs[i].set2(int_stk_helper(j));
break;
case T_FLOAT:
if ( j < 16 ) {
// V9ism: floats go in ODD registers
regs[i].set1(as_FloatRegister(1 + (j<<1))->as_VMReg());
} else {
// V9ism: floats go in ODD stack slot
regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1)));
// Per SPARC Compliance Definition 2.4.1, page 3P-12 available here
// http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz
//
// "When a callee prototype exists, and does not indicate variable arguments,
// floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248
// will be promoted to floating-point registers"
//
// By "promoted" it means that the argument is located in two places, an unused
// spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live
// float register. In most cases, there are 6 or fewer arguments of any type,
// and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive)
// serve as shadow slots. Per the spec floating point registers %d6 to %d16
// require slots beyond that (up to %sp+BIAS+248).
//
{
// V9ism: floats go in ODD registers and stack slots
int float_index = 1 + (j << 1);
param_array_reg.set1(VMRegImpl::stack2reg(float_index));
if (j < 16) {
regs[i].set1(as_FloatRegister(float_index)->as_VMReg());
} else {
regs[i] = param_array_reg;
}
}
break;
case T_DOUBLE:
assert( sig_bt[i+1] == T_VOID, "expecting half" );
if ( j < 16 ) {
// V9ism: doubles go in EVEN/ODD regs
regs[i].set2(as_FloatRegister(j<<1)->as_VMReg());
} else {
// V9ism: doubles go in EVEN/ODD stack slots
regs[i].set2(VMRegImpl::stack2reg(j<<1));
{
assert(sig_bt[i + 1] == T_VOID, "expecting half");
// V9ism: doubles go in EVEN/ODD regs and stack slots
int double_index = (j << 1);
param_array_reg.set2(VMRegImpl::stack2reg(double_index));
if (j < 16) {
regs[i].set2(as_FloatRegister(double_index)->as_VMReg());
} else {
// V9ism: doubles go in EVEN/ODD stack slots
regs[i] = param_array_reg;
}
}
break;
case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES
case T_VOID:
regs[i].set_bad();
j--;
break; // Do not count HALVES
default:
ShouldNotReachHere();
}
if (regs[i].first()->is_stack()) {
int off = regs[i].first()->reg2stack();
// Keep track of the deepest parameter array slot.
if (!param_array_reg.first()->is_valid()) {
param_array_reg = regs[i];
}
if (param_array_reg.first()->is_stack()) {
int off = param_array_reg.first()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
if (regs[i].second()->is_stack()) {
int off = regs[i].second()->reg2stack();
if (param_array_reg.second()->is_stack()) {
int off = param_array_reg.second()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
}
......@@ -1180,8 +1211,8 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
#else // _LP64
// V8 convention: first 6 things in O-regs, rest on stack.
// Alignment is willy-nilly.
for( int i=0; i<total_args_passed; i++ ) {
switch( sig_bt[i] ) {
for (int i = 0; i < total_args_passed; i++) {
switch (sig_bt[i]) {
case T_ADDRESS: // raw pointers, like current thread, for VM calls
case T_ARRAY:
case T_BOOLEAN:
......@@ -1192,23 +1223,23 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
case T_OBJECT:
case T_METADATA:
case T_SHORT:
regs[i].set1( int_stk_helper( i ) );
regs[i].set1(int_stk_helper(i));
break;
case T_DOUBLE:
case T_LONG:
assert( sig_bt[i+1] == T_VOID, "expecting half" );
regs[i].set_pair( int_stk_helper( i+1 ), int_stk_helper( i ) );
assert(sig_bt[i + 1] == T_VOID, "expecting half");
regs[i].set_pair(int_stk_helper(i + 1), int_stk_helper(i));
break;
case T_VOID: regs[i].set_bad(); break;
default:
ShouldNotReachHere();
}
if (regs[i].first()->is_stack()) {
int off = regs[i].first()->reg2stack();
int off = regs[i].first()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
if (regs[i].second()->is_stack()) {
int off = regs[i].second()->reg2stack();
int off = regs[i].second()->reg2stack();
if (off > max_stack_slots) max_stack_slots = off;
}
}
......@@ -1357,11 +1388,10 @@ static void object_move(MacroAssembler* masm,
const Register rOop = src.first()->as_Register();
const Register rHandle = L5;
int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset;
int offset = oop_slot*VMRegImpl::stack_slot_size;
Label skip;
int offset = oop_slot * VMRegImpl::stack_slot_size;
__ st_ptr(rOop, SP, offset + STACK_BIAS);
if (is_receiver) {
*receiver_offset = oop_slot * VMRegImpl::stack_slot_size;
*receiver_offset = offset;
}
map->set_oop(VMRegImpl::stack2reg(oop_slot));
__ add(SP, offset + STACK_BIAS, rHandle);
......
......@@ -1989,7 +1989,7 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) {
// to implement the UseStrictFP mode.
const bool Matcher::strict_fp_requires_explicit_rounding = false;
// Are floats conerted to double when stored to stack during deoptimization?
// Are floats converted to double when stored to stack during deoptimization?
// Sparc does not handle callee-save floats.
bool Matcher::float_in_double() { return false; }
......@@ -3218,7 +3218,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r
// are owned by the CALLEE. Holes should not be nessecary in the
// incoming area, as the Java calling convention is completely under
// the control of the AD file. Doubles can be sorted and packed to
// avoid holes. Holes in the outgoing arguments may be nessecary for
// avoid holes. Holes in the outgoing arguments may be necessary for
// varargs C calling conventions.
// Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is
// even aligned with pad0 as needed.
......@@ -3284,7 +3284,7 @@ frame %{
%}
// Body of function which returns an OptoRegs array locating
// arguments either in registers or in stack slots for callin
// arguments either in registers or in stack slots for calling
// C.
c_calling_convention %{
// This is obviously always outgoing
......
......@@ -327,7 +327,7 @@ void Canonicalizer::do_ShiftOp (ShiftOp* x) {
if (t2->is_constant()) {
switch (t2->tag()) {
case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
case longTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return;
case longTag : if (t2->as_LongConstant()->value() == (jlong)0) set_canonical(x->x()); return;
default : ShouldNotReachHere();
}
}
......@@ -808,28 +808,41 @@ void Canonicalizer::do_ExceptionObject(ExceptionObject* x) {}
static bool match_index_and_scale(Instruction* instr,
Instruction** index,
int* log2_scale,
Instruction** instr_to_unpin) {
*instr_to_unpin = NULL;
// Skip conversion ops
int* log2_scale) {
// Skip conversion ops. This works only on 32bit because of the implicit l2i that the
// unsafe performs.
#ifndef _LP64
Convert* convert = instr->as_Convert();
if (convert != NULL) {
if (convert != NULL && convert->op() == Bytecodes::_i2l) {
assert(convert->value()->type() == intType, "invalid input type");
instr = convert->value();
}
#endif
ShiftOp* shift = instr->as_ShiftOp();
if (shift != NULL) {
if (shift->is_pinned()) {
*instr_to_unpin = shift;
if (shift->op() == Bytecodes::_lshl) {
assert(shift->x()->type() == longType, "invalid input type");
} else {
#ifndef _LP64
if (shift->op() == Bytecodes::_ishl) {
assert(shift->x()->type() == intType, "invalid input type");
} else {
return false;
}
#else
return false;
#endif
}
// Constant shift value?
Constant* con = shift->y()->as_Constant();
if (con == NULL) return false;
// Well-known type and value?
IntConstant* val = con->type()->as_IntConstant();
if (val == NULL) return false;
if (shift->x()->type() != intType) return false;
assert(val != NULL, "Should be an int constant");
*index = shift->x();
int tmp_scale = val->value();
if (tmp_scale >= 0 && tmp_scale < 4) {
......@@ -842,31 +855,42 @@ static bool match_index_and_scale(Instruction* instr,
ArithmeticOp* arith = instr->as_ArithmeticOp();
if (arith != NULL) {
if (arith->is_pinned()) {
*instr_to_unpin = arith;
// See if either arg is a known constant
Constant* con = arith->x()->as_Constant();
if (con != NULL) {
*index = arith->y();
} else {
con = arith->y()->as_Constant();
if (con == NULL) return false;
*index = arith->x();
}
long const_value;
// Check for integer multiply
if (arith->op() == Bytecodes::_imul) {
// See if either arg is a known constant
Constant* con = arith->x()->as_Constant();
if (con != NULL) {
*index = arith->y();
if (arith->op() == Bytecodes::_lmul) {
assert((*index)->type() == longType, "invalid input type");
LongConstant* val = con->type()->as_LongConstant();
assert(val != NULL, "expecting a long constant");
const_value = val->value();
} else {
#ifndef _LP64
if (arith->op() == Bytecodes::_imul) {
assert((*index)->type() == intType, "invalid input type");
IntConstant* val = con->type()->as_IntConstant();
assert(val != NULL, "expecting an int constant");
const_value = val->value();
} else {
con = arith->y()->as_Constant();
if (con == NULL) return false;
*index = arith->x();
}
if ((*index)->type() != intType) return false;
// Well-known type and value?
IntConstant* val = con->type()->as_IntConstant();
if (val == NULL) return false;
switch (val->value()) {
case 1: *log2_scale = 0; return true;
case 2: *log2_scale = 1; return true;
case 4: *log2_scale = 2; return true;
case 8: *log2_scale = 3; return true;
default: return false;
return false;
}
#else
return false;
#endif
}
switch (const_value) {
case 1: *log2_scale = 0; return true;
case 2: *log2_scale = 1; return true;
case 4: *log2_scale = 2; return true;
case 8: *log2_scale = 3; return true;
default: return false;
}
}
......@@ -879,29 +903,37 @@ static bool match(UnsafeRawOp* x,
Instruction** base,
Instruction** index,
int* log2_scale) {
Instruction* instr_to_unpin = NULL;
ArithmeticOp* root = x->base()->as_ArithmeticOp();
if (root == NULL) return false;
// Limit ourselves to addition for now
if (root->op() != Bytecodes::_ladd) return false;
bool match_found = false;
// Try to find shift or scale op
if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) {
if (match_index_and_scale(root->y(), index, log2_scale)) {
*base = root->x();
} else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) {
match_found = true;
} else if (match_index_and_scale(root->x(), index, log2_scale)) {
*base = root->y();
} else if (root->y()->as_Convert() != NULL) {
match_found = true;
} else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) {
// Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs.
// 64bit needs a real sign-extending conversion.
Convert* convert = root->y()->as_Convert();
if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) {
if (convert->op() == Bytecodes::_i2l) {
assert(convert->value()->type() == intType, "should be an int");
// pick base and index, setting scale at 1
*base = root->x();
*index = convert->value();
*log2_scale = 0;
} else {
return false;
match_found = true;
}
} else {
// doesn't match any expected sequences
return false;
}
// The default solution
if (!match_found) {
*base = root->x();
*index = root->y();
*log2_scale = 0;
}
// If the value is pinned then it will be always be computed so
......
......@@ -2042,6 +2042,8 @@ void LIRGenerator::do_RoundFP(RoundFP* x) {
}
}
// Here UnsafeGetRaw may have x->base() and x->index() be int or long
// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit.
void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
LIRItem base(x->base(), this);
LIRItem idx(this);
......@@ -2056,50 +2058,73 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
int log2_scale = 0;
if (x->has_index()) {
assert(x->index()->type()->tag() == intTag, "should not find non-int index");
log2_scale = x->log2_scale();
}
assert(!x->has_index() || idx.value() == x->index(), "should match");
LIR_Opr base_op = base.result();
LIR_Opr index_op = idx.result();
#ifndef _LP64
if (x->base()->type()->tag() == longTag) {
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op);
} else {
assert(x->base()->type()->tag() == intTag, "must be");
}
if (x->has_index()) {
if (x->index()->type()->tag() == longTag) {
LIR_Opr long_index_op = index_op;
if (x->index()->type()->is_constant()) {
long_index_op = new_register(T_LONG);
__ move(index_op, long_index_op);
}
index_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, long_index_op, index_op);
} else {
assert(x->index()->type()->tag() == intTag, "must be");
}
}
// At this point base and index should be all ints.
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
assert(!x->has_index() || index_op->type() == T_INT, "index should be an int");
#else
if (x->has_index()) {
if (x->index()->type()->tag() == intTag) {
if (!x->index()->type()->is_constant()) {
index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op);
}
} else {
assert(x->index()->type()->tag() == longTag, "must be");
if (x->index()->type()->is_constant()) {
index_op = new_register(T_LONG);
__ move(idx.result(), index_op);
}
}
}
// At this point base is a long non-constant
// Index is a long register or a int constant.
// We allow the constant to stay an int because that would allow us a more compact encoding by
// embedding an immediate offset in the address expression. If we have a long constant, we have to
// move it into a register first.
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant");
assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) ||
(index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type");
#endif
BasicType dst_type = x->basic_type();
LIR_Opr index_op = idx.result();
LIR_Address* addr;
if (index_op->is_constant()) {
assert(log2_scale == 0, "must not have a scale");
assert(index_op->type() == T_INT, "only int constants supported");
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
} else {
#ifdef X86
#ifdef _LP64
if (!index_op->is_illegal() && index_op->type() == T_INT) {
LIR_Opr tmp = new_pointer_register();
__ convert(Bytecodes::_i2l, index_op, tmp);
index_op = tmp;
}
#endif
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
#elif defined(ARM)
addr = generate_address(base_op, index_op, log2_scale, 0, dst_type);
#else
if (index_op->is_illegal() || log2_scale == 0) {
#ifdef _LP64
if (!index_op->is_illegal() && index_op->type() == T_INT) {
LIR_Opr tmp = new_pointer_register();
__ convert(Bytecodes::_i2l, index_op, tmp);
index_op = tmp;
}
#endif
addr = new LIR_Address(base_op, index_op, dst_type);
} else {
LIR_Opr tmp = new_pointer_register();
......@@ -2126,7 +2151,6 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
BasicType type = x->basic_type();
if (x->has_index()) {
assert(x->index()->type()->tag() == intTag, "should not find non-int index");
log2_scale = x->log2_scale();
}
......@@ -2149,38 +2173,39 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
set_no_result(x);
LIR_Opr base_op = base.result();
LIR_Opr index_op = idx.result();
#ifndef _LP64
if (x->base()->type()->tag() == longTag) {
base_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, base.result(), base_op);
} else {
assert(x->base()->type()->tag() == intTag, "must be");
}
if (x->has_index()) {
if (x->index()->type()->tag() == longTag) {
index_op = new_register(T_INT);
__ convert(Bytecodes::_l2i, idx.result(), index_op);
}
}
// At this point base and index should be all ints and not constants
assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int");
assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int");
#else
if (x->has_index()) {
if (x->index()->type()->tag() == intTag) {
index_op = new_register(T_LONG);
__ convert(Bytecodes::_i2l, idx.result(), index_op);
}
}
// At this point base and index are long and non-constant
assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long");
assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long");
#endif
LIR_Opr index_op = idx.result();
if (log2_scale != 0) {
// temporary fix (platform dependent code without shift on Intel would be better)
index_op = new_pointer_register();
#ifdef _LP64
if(idx.result()->type() == T_INT) {
__ convert(Bytecodes::_i2l, idx.result(), index_op);
} else {
#endif
// TODO: ARM also allows embedded shift in the address
__ move(idx.result(), index_op);
#ifdef _LP64
}
#endif
// TODO: ARM also allows embedded shift in the address
__ shift_left(index_op, log2_scale, index_op);
}
#ifdef _LP64
else if(!index_op->is_illegal() && index_op->type() == T_INT) {
LIR_Opr tmp = new_pointer_register();
__ convert(Bytecodes::_i2l, index_op, tmp);
index_op = tmp;
}
#endif
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
__ move(value.result(), addr);
......
......@@ -205,7 +205,7 @@ Symbol* SymbolTable::lookup(int index, const char* name,
}
}
// If the bucket size is too deep check if this hash code is insufficient.
if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
if (count >= rehash_count && !needs_rehashing()) {
_needs_rehashing = check_rehash_table(count);
}
return NULL;
......@@ -656,7 +656,7 @@ oop StringTable::lookup(int index, jchar* name,
}
}
// If the bucket size is too deep check if this hash code is insufficient.
if (count >= BasicHashtable<mtSymbol>::rehash_count && !needs_rehashing()) {
if (count >= rehash_count && !needs_rehashing()) {
_needs_rehashing = check_rehash_table(count);
}
return NULL;
......
......@@ -74,7 +74,7 @@ class TempNewSymbol : public StackObj {
operator Symbol*() { return _temp; }
};
class SymbolTable : public Hashtable<Symbol*, mtSymbol> {
class SymbolTable : public RehashableHashtable<Symbol*, mtSymbol> {
friend class VMStructs;
friend class ClassFileParser;
......@@ -110,10 +110,10 @@ private:
Symbol* lookup(int index, const char* name, int len, unsigned int hash);
SymbolTable()
: Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
: RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
: Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
: RehashableHashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
number_of_entries) {}
// Arena for permanent symbols (null class loader) that are never unloaded
......@@ -252,7 +252,7 @@ public:
static int parallel_claimed_index() { return _parallel_claimed_idx; }
};
class StringTable : public Hashtable<oop, mtSymbol> {
class StringTable : public RehashableHashtable<oop, mtSymbol> {
friend class VMStructs;
private:
......@@ -278,11 +278,11 @@ private:
// in the range [start_idx, end_idx).
static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed);
StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize,
StringTable() : RehashableHashtable<oop, mtSymbol>((int)StringTableSize,
sizeof (HashtableEntry<oop, mtSymbol>)) {}
StringTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
: Hashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
: RehashableHashtable<oop, mtSymbol>((int)StringTableSize, sizeof (HashtableEntry<oop, mtSymbol>), t,
number_of_entries) {}
public:
// The string table
......
......@@ -1175,6 +1175,12 @@ void CompileBroker::compile_method_base(methodHandle method,
return;
}
if (TieredCompilation) {
// Tiered policy requires MethodCounters to exist before adding a method to
// the queue. Create if we don't have them yet.
method->get_method_counters(thread);
}
// Outputs from the following MutexLocker block:
CompileTask* task = NULL;
bool blocking = false;
......
......@@ -26,222 +26,64 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
#include "memory/allocation.hpp"
#include "memory/freeList.hpp"
#include "runtime/globals.hpp"
class CodeBlobClosure;
// The elements of the G1CodeRootChunk is either:
// 1) nmethod pointers
// 2) nodes in an internally chained free list
typedef union {
nmethod* _nmethod;
void* _link;
} NmethodOrLink;
class G1CodeRootChunk : public CHeapObj<mtGC> {
private:
static const int NUM_ENTRIES = 32;
public:
G1CodeRootChunk* _next;
G1CodeRootChunk* _prev;
NmethodOrLink* _top;
// First free position within the chunk.
volatile NmethodOrLink* _free;
NmethodOrLink _data[NUM_ENTRIES];
NmethodOrLink* bottom() const {
return (NmethodOrLink*) &(_data[0]);
}
NmethodOrLink* end() const {
return (NmethodOrLink*) &(_data[NUM_ENTRIES]);
}
bool is_link(NmethodOrLink* nmethod_or_link) {
return nmethod_or_link->_link == NULL ||
(bottom() <= nmethod_or_link->_link
&& nmethod_or_link->_link < end());
}
bool is_nmethod(NmethodOrLink* nmethod_or_link) {
return !is_link(nmethod_or_link);
}
public:
G1CodeRootChunk();
~G1CodeRootChunk() {}
static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); }
// FreeList "interface" methods
G1CodeRootChunk* next() const { return _next; }
G1CodeRootChunk* prev() const { return _prev; }
void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");}
void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");}
void clear_next() { set_next(NULL); }
void clear_prev() { set_prev(NULL); }
size_t size() const { return word_size(); }
void link_next(G1CodeRootChunk* ptr) { set_next(ptr); }
void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); }
void link_after(G1CodeRootChunk* ptr) {
link_next(ptr);
if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this);
}
bool is_free() { return true; }
// New G1CodeRootChunk routines
void reset();
bool is_empty() const {
return _top == bottom();
}
bool is_full() const {
return _top == end() && _free == NULL;
}
bool contains(nmethod* method) {
NmethodOrLink* cur = bottom();
while (cur != _top) {
if (cur->_nmethod == method) return true;
cur++;
}
return false;
}
bool add(nmethod* method) {
if (is_full()) {
return false;
}
if (_free != NULL) {
// Take from internally chained free list
NmethodOrLink* first_free = (NmethodOrLink*)_free;
_free = (NmethodOrLink*)_free->_link;
first_free->_nmethod = method;
} else {
// Take from top.
_top->_nmethod = method;
_top++;
}
return true;
}
bool remove_lock_free(nmethod* method);
void nmethods_do(CodeBlobClosure* blk);
nmethod* pop() {
if (_free != NULL) {
// Kill the free list.
_free = NULL;
}
while (!is_empty()) {
_top--;
if (is_nmethod(_top)) {
return _top->_nmethod;
}
}
return NULL;
}
};
// Manages free chunks.
class G1CodeRootChunkManager VALUE_OBJ_CLASS_SPEC {
private:
// Global free chunk list management
FreeList<G1CodeRootChunk> _free_list;
// Total number of chunks handed out
size_t _num_chunks_handed_out;
public:
G1CodeRootChunkManager();
G1CodeRootChunk* new_chunk();
void free_chunk(G1CodeRootChunk* chunk);
// Free all elements of the given list.
void free_all_chunks(FreeList<G1CodeRootChunk>* list);
void initialize();
void purge_chunks(size_t keep_ratio);
static size_t static_mem_size();
size_t fl_mem_size();
#ifndef PRODUCT
size_t num_chunks_handed_out() const;
size_t num_free_chunks() const;
#endif
};
class CodeRootSetTable;
class HeapRegion;
class nmethod;
// Implements storage for a set of code roots.
// All methods that modify the set are not thread-safe except if otherwise noted.
class G1CodeRootSet VALUE_OBJ_CLASS_SPEC {
friend class G1CodeRootSetTest;
private:
// Global default free chunk manager instance.
static G1CodeRootChunkManager _default_chunk_manager;
G1CodeRootChunk* new_chunk() { return _manager->new_chunk(); }
void free_chunk(G1CodeRootChunk* chunk) { _manager->free_chunk(chunk); }
// Free all elements of the given list.
void free_all_chunks(FreeList<G1CodeRootChunk>* list) { _manager->free_all_chunks(list); }
const static size_t SmallSize = 32;
const static size_t Threshold = 24;
const static size_t LargeSize = 512;
// Return the chunk that contains the given nmethod, NULL otherwise.
// Scans the list of chunks backwards, as this method is used to add new
// entries, which are typically added in bulk for a single nmethod.
G1CodeRootChunk* find(nmethod* method);
void free(G1CodeRootChunk* chunk);
CodeRootSetTable* _table;
CodeRootSetTable* load_acquire_table();
size_t _length;
FreeList<G1CodeRootChunk> _list;
G1CodeRootChunkManager* _manager;
void move_to_large();
void allocate_small_table();
public:
// If an instance is initialized with a chunk manager of NULL, use the global
// default one.
G1CodeRootSet(G1CodeRootChunkManager* manager = NULL);
G1CodeRootSet() : _table(NULL), _length(0) {}
~G1CodeRootSet();
static void purge_chunks(size_t keep_ratio);
static void purge();
static size_t free_chunks_static_mem_size();
static size_t free_chunks_mem_size();
static size_t static_mem_size();
// Search for the code blob from the recently allocated ones to find duplicates more quickly, as this
// method is likely to be repeatedly called with the same nmethod.
void add(nmethod* method);
void remove_lock_free(nmethod* method);
nmethod* pop();
bool remove(nmethod* method);
// Safe to call without synchronization, but may return false negatives.
bool contains(nmethod* method);
void clear();
void nmethods_do(CodeBlobClosure* blk) const;
bool is_empty() { return length() == 0; }
// Remove all nmethods which no longer contain pointers into our "owner" region
void clean(HeapRegion* owner);
bool is_empty() {
bool empty = length() == 0;
assert(empty == (_table == NULL), "is empty only if table is deallocated");
return empty;
}
// Length in elements
size_t length() const { return _length; }
// Static data memory size in bytes of this set.
static size_t static_mem_size();
// Memory size in bytes taken by this set.
size_t mem_size();
static void test() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP
......@@ -4580,6 +4580,56 @@ class G1KlassScanClosure : public KlassClosure {
}
};
class G1CodeBlobClosure : public CodeBlobClosure {
class HeapRegionGatheringOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
OopClosure* _work;
nmethod* _nm;
template <typename T>
void do_oop_work(T* p) {
_work->do_oop(p);
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(oop_or_narrowoop)) {
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
HeapRegion* hr = _g1h->heap_region_containing_raw(o);
assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset");
hr->add_strong_code_root(_nm);
}
}
public:
HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {}
void do_oop(oop* o) {
do_oop_work(o);
}
void do_oop(narrowOop* o) {
do_oop_work(o);
}
void set_nm(nmethod* nm) {
_nm = nm;
}
};
HeapRegionGatheringOopClosure _oc;
public:
G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
void do_code_blob(CodeBlob* cb) {
nmethod* nm = cb->as_nmethod_or_null();
if (nm != NULL) {
if (!nm->test_set_oops_do_mark()) {
_oc.set_nm(nm);
nm->oops_do(&_oc);
nm->fix_oop_relocations();
}
}
}
};
class G1ParTask : public AbstractGangTask {
protected:
G1CollectedHeap* _g1h;
......@@ -4648,22 +4698,6 @@ public:
}
};
class G1CodeBlobClosure: public CodeBlobClosure {
OopClosure* _f;
public:
G1CodeBlobClosure(OopClosure* f) : _f(f) {}
void do_code_blob(CodeBlob* blob) {
nmethod* that = blob->as_nmethod_or_null();
if (that != NULL) {
if (!that->test_set_oops_do_mark()) {
that->oops_do(_f);
that->fix_oop_relocations();
}
}
}
};
void work(uint worker_id) {
if (worker_id >= _n_workers) return; // no work needed this round
......@@ -4854,7 +4888,7 @@ g1_process_roots(OopClosure* scan_non_heap_roots,
g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms);
// Now scan the complement of the collection set.
MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations);
G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots);
g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i);
......@@ -5901,12 +5935,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
hot_card_cache->reset_hot_cache();
hot_card_cache->set_use_cache(true);
// Migrate the strong code roots attached to each region in
// the collection set. Ideally we would like to do this
// after we have finished the scanning/evacuation of the
// strong code roots for a particular heap region.
migrate_strong_code_roots();
purge_code_root_memory();
if (g1_policy()->during_initial_mark_pause()) {
......@@ -6902,13 +6930,8 @@ class RegisterNMethodOopClosure: public OopClosure {
" starting at "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
// HeapRegion::add_strong_code_root() avoids adding duplicate
// entries but having duplicates is OK since we "mark" nmethods
// as visited when we scan the strong code root lists during the GC.
hr->add_strong_code_root(_nm);
assert(hr->rem_set()->strong_code_roots_list_contains(_nm),
err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr)));
// HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries.
hr->add_strong_code_root_locked(_nm);
}
}
......@@ -6935,9 +6958,6 @@ class UnregisterNMethodOopClosure: public OopClosure {
_nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region())));
hr->remove_strong_code_root(_nm);
assert(!hr->rem_set()->strong_code_roots_list_contains(_nm),
err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT,
_nm, HR_FORMAT_PARAMS(hr)));
}
}
......@@ -6965,28 +6985,9 @@ void G1CollectedHeap::unregister_nmethod(nmethod* nm) {
nm->oops_do(&reg_cl, true);
}
class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure {
public:
bool doHeapRegion(HeapRegion *hr) {
assert(!hr->isHumongous(),
err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
HR_FORMAT_PARAMS(hr)));
hr->migrate_strong_code_roots();
return false;
}
};
void G1CollectedHeap::migrate_strong_code_roots() {
MigrateCodeRootsHeapRegionClosure cl;
double migrate_start = os::elapsedTime();
collection_set_iterate(&cl);
double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms);
}
void G1CollectedHeap::purge_code_root_memory() {
double purge_start = os::elapsedTime();
G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent);
G1CodeRootSet::purge();
double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0;
g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms);
}
......
/*
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
......@@ -1633,12 +1633,6 @@ public:
// Unregister the given nmethod from the G1 heap
virtual void unregister_nmethod(nmethod* nm);
// Migrate the nmethods in the code root lists of the regions
// in the collection set to regions in to-space. In the event
// of an evacuation failure, nmethods that reference objects
// that were not successfullly evacuated are not migrated.
void migrate_strong_code_roots();
// Free up superfluous code root memory.
void purge_code_root_memory();
......
......@@ -217,6 +217,8 @@ public:
_update_rset_cl->set_region(hr);
hr->object_iterate(&rspc);
hr->rem_set()->clean_strong_code_roots(hr);
hr->note_self_forwarding_removal_end(during_initial_mark,
during_conc_mark,
rspc.marked_bytes());
......
......@@ -274,9 +274,6 @@ double G1GCPhaseTimes::accounted_time_ms() {
// Now subtract the time taken to fix up roots in generated code
misc_time_ms += _cur_collection_code_root_fixup_time_ms;
// Strong code root migration time
misc_time_ms += _cur_strong_code_root_migration_time_ms;
// Strong code root purge time
misc_time_ms += _cur_strong_code_root_purge_time_ms;
......@@ -327,7 +324,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) {
_last_obj_copy_times_ms.print(1, "Object Copy (ms)");
}
print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms);
print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms);
print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms);
if (G1StringDedup::is_enabled()) {
print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads);
......
......@@ -129,7 +129,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
double _cur_collection_par_time_ms;
double _cur_collection_code_root_fixup_time_ms;
double _cur_strong_code_root_migration_time_ms;
double _cur_strong_code_root_purge_time_ms;
double _cur_evac_fail_recalc_used;
......@@ -233,10 +232,6 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
_cur_collection_code_root_fixup_time_ms = ms;
}
void record_strong_code_root_migration_time(double ms) {
_cur_strong_code_root_migration_time_ms = ms;
}
void record_strong_code_root_purge_time(double ms) {
_cur_strong_code_root_purge_time_ms = ms;
}
......
......@@ -109,7 +109,7 @@ class ScanRSClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
OopsInHeapRegionClosure* _oc;
CodeBlobToOopClosure* _code_root_cl;
CodeBlobClosure* _code_root_cl;
G1BlockOffsetSharedArray* _bot_shared;
G1SATBCardTableModRefBS *_ct_bs;
......@@ -121,7 +121,7 @@ class ScanRSClosure : public HeapRegionClosure {
public:
ScanRSClosure(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
CodeBlobClosure* code_root_cl,
uint worker_i) :
_oc(oc),
_code_root_cl(code_root_cl),
......@@ -241,7 +241,7 @@ public:
};
void G1RemSet::scanRS(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
CodeBlobClosure* code_root_cl,
uint worker_i) {
double rs_time_start = os::elapsedTime();
HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i);
......@@ -320,7 +320,7 @@ void G1RemSet::cleanupHRRS() {
}
void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
CodeBlobClosure* code_root_cl,
uint worker_i) {
#if CARD_REPEAT_HISTO
ct_freq_update_histo_and_reset();
......
......@@ -96,7 +96,7 @@ public:
// the "i" passed to the calling thread's work(i) function.
// In the sequential case this param will be ignored.
void oops_into_collection_set_do(OopsInHeapRegionClosure* blk,
CodeBlobToOopClosure* code_root_cl,
CodeBlobClosure* code_root_cl,
uint worker_i);
// Prepare for and cleanup after an oops_into_collection_set_do
......@@ -108,7 +108,7 @@ public:
void cleanup_after_oops_into_collection_set_do();
void scanRS(OopsInHeapRegionClosure* oc,
CodeBlobToOopClosure* code_root_cl,
CodeBlobClosure* code_root_cl,
uint worker_i);
void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i);
......
......@@ -253,6 +253,7 @@ public:
size_t occupied_cards = hrrs->occupied();
size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size();
if (code_root_mem_sz > max_code_root_mem_sz()) {
_max_code_root_mem_sz = code_root_mem_sz;
_max_code_root_mem_sz_region = r;
}
size_t code_root_elems = hrrs->strong_code_roots_list_length();
......
......@@ -285,10 +285,6 @@
product(uintx, G1MixedGCCountTarget, 8, \
"The target number of mixed GCs after a marking cycle.") \
\
experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \
"The amount of code root chunks that should be kept at most " \
"as percentage of already allocated.") \
\
experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \
"Try to reclaim dead large objects at every young GC.") \
\
......
......@@ -549,19 +549,15 @@ void HeapRegion::add_strong_code_root(nmethod* nm) {
hrrs->add_strong_code_root(nm);
}
void HeapRegion::remove_strong_code_root(nmethod* nm) {
void HeapRegion::add_strong_code_root_locked(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
HeapRegionRemSet* hrrs = rem_set();
hrrs->remove_strong_code_root(nm);
hrrs->add_strong_code_root_locked(nm);
}
void HeapRegion::migrate_strong_code_roots() {
assert(in_collection_set(), "only collection set regions");
assert(!isHumongous(),
err_msg("humongous region "HR_FORMAT" should not have been added to collection set",
HR_FORMAT_PARAMS(this)));
void HeapRegion::remove_strong_code_root(nmethod* nm) {
HeapRegionRemSet* hrrs = rem_set();
hrrs->migrate_strong_code_roots();
hrrs->remove_strong_code_root(nm);
}
void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const {
......
......@@ -772,14 +772,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
// Routines for managing a list of code roots (attached to the
// this region's RSet) that point into this heap region.
void add_strong_code_root(nmethod* nm);
void add_strong_code_root_locked(nmethod* nm);
void remove_strong_code_root(nmethod* nm);
// During a collection, migrate the successfully evacuated
// strong code roots that referenced into this region to the
// new regions that they now point into. Unsuccessfully
// evacuated code roots are not migrated.
void migrate_strong_code_roots();
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list for this region
void strong_code_roots_do(CodeBlobClosure* blk) const;
......
......@@ -923,8 +923,24 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs,
}
// Code roots support
//
// The code root set is protected by two separate locking schemes
// When at safepoint the per-hrrs lock must be held during modifications
// except when doing a full gc.
// When not at safepoint the CodeCache_lock must be held during modifications.
// When concurrent readers access the contains() function
// (during the evacuation phase) no removals are allowed.
void HeapRegionRemSet::add_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
// Optimistic unlocked contains-check
if (!_code_roots.contains(nm)) {
MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag);
add_strong_code_root_locked(nm);
}
}
void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) {
assert(nm != NULL, "sanity");
_code_roots.add(nm);
}
......@@ -933,98 +949,21 @@ void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) {
assert(nm != NULL, "sanity");
assert_locked_or_safepoint(CodeCache_lock);
_code_roots.remove_lock_free(nm);
MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag);
_code_roots.remove(nm);
// Check that there were no duplicates
guarantee(!_code_roots.contains(nm), "duplicate entry found");
}
class NMethodMigrationOopClosure : public OopClosure {
G1CollectedHeap* _g1h;
HeapRegion* _from;
nmethod* _nm;
uint _num_self_forwarded;
template <class T> void do_oop_work(T* p) {
T heap_oop = oopDesc::load_heap_oop(p);
if (!oopDesc::is_null(heap_oop)) {
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
if (_from->is_in(obj)) {
// Reference still points into the source region.
// Since roots are immediately evacuated this means that
// we must have self forwarded the object
assert(obj->is_forwarded(),
err_msg("code roots should be immediately evacuated. "
"Ref: "PTR_FORMAT", "
"Obj: "PTR_FORMAT", "
"Region: "HR_FORMAT,
p, (void*) obj, HR_FORMAT_PARAMS(_from)));
assert(obj->forwardee() == obj,
err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj));
// The object has been self forwarded.
// Note, if we're during an initial mark pause, there is
// no need to explicitly mark object. It will be marked
// during the regular evacuation failure handling code.
_num_self_forwarded++;
} else {
// The reference points into a promotion or to-space region
HeapRegion* to = _g1h->heap_region_containing(obj);
to->rem_set()->add_strong_code_root(_nm);
}
}
}
public:
NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm):
_g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {}
void do_oop(narrowOop* p) { do_oop_work(p); }
void do_oop(oop* p) { do_oop_work(p); }
uint retain() { return _num_self_forwarded > 0; }
};
void HeapRegionRemSet::migrate_strong_code_roots() {
assert(hr()->in_collection_set(), "only collection set regions");
assert(!hr()->isHumongous(),
err_msg("humongous region "HR_FORMAT" should not have been added to the collection set",
HR_FORMAT_PARAMS(hr())));
ResourceMark rm;
// List of code blobs to retain for this region
GrowableArray<nmethod*> to_be_retained(10);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
while (!_code_roots.is_empty()) {
nmethod *nm = _code_roots.pop();
if (nm != NULL) {
NMethodMigrationOopClosure oop_cl(g1h, hr(), nm);
nm->oops_do(&oop_cl);
if (oop_cl.retain()) {
to_be_retained.push(nm);
}
}
}
// Now push any code roots we need to retain
assert(to_be_retained.is_empty() || hr()->evacuation_failed(),
"Retained nmethod list must be empty or "
"evacuation of this region failed");
while (to_be_retained.is_nonempty()) {
nmethod* nm = to_be_retained.pop();
assert(nm != NULL, "sanity");
add_strong_code_root(nm);
}
}
void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const {
_code_roots.nmethods_do(blk);
}
void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) {
_code_roots.clean(hr);
}
size_t HeapRegionRemSet::strong_code_roots_mem_size() {
return _code_roots.mem_size();
}
......
......@@ -353,13 +353,13 @@ public:
// Returns the memory occupancy of all static data structures associated
// with remembered sets.
static size_t static_mem_size() {
return OtherRegionsTable::static_mem_size() + G1CodeRootSet::free_chunks_static_mem_size();
return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size();
}
// Returns the memory occupancy of all free_list data structures associated
// with remembered sets.
static size_t fl_mem_size() {
return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::free_chunks_mem_size();
return OtherRegionsTable::fl_mem_size();
}
bool contains_reference(OopOrNarrowOopStar from) const {
......@@ -369,18 +369,15 @@ public:
// Routines for managing the list of code roots that point into
// the heap region that owns this RSet.
void add_strong_code_root(nmethod* nm);
void add_strong_code_root_locked(nmethod* nm);
void remove_strong_code_root(nmethod* nm);
// During a collection, migrate the successfully evacuated strong
// code roots that referenced into the region that owns this RSet
// to the RSets of the new regions that they now point into.
// Unsuccessfully evacuated code roots are not migrated.
void migrate_strong_code_roots();
// Applies blk->do_code_blob() to each of the entries in
// the strong code roots list
void strong_code_roots_do(CodeBlobClosure* blk) const;
void clean_strong_code_roots(HeapRegion* hr);
// Returns the number of elements in the strong code roots list
size_t strong_code_roots_list_length() const {
return _code_roots.length();
......
......@@ -34,7 +34,6 @@
#if INCLUDE_ALL_GCS
#include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
#include "gc_implementation/g1/g1CodeCacheRemSet.hpp"
#endif // INCLUDE_ALL_GCS
// Free list. A FreeList is used to access a linked list of chunks
......@@ -333,5 +332,4 @@ template class FreeList<Metablock>;
template class FreeList<Metachunk>;
#if INCLUDE_ALL_GCS
template class FreeList<FreeChunk>;
template class FreeList<G1CodeRootChunk>;
#endif // INCLUDE_ALL_GCS
......@@ -93,7 +93,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) {
set_hidden(false);
set_dont_inline(false);
set_method_data(NULL);
set_method_counters(NULL);
clear_method_counters();
set_vtable_index(Method::garbage_vtable_index);
// Fix and bury in Method*
......@@ -117,7 +117,7 @@ void Method::deallocate_contents(ClassLoaderData* loader_data) {
MetadataFactory::free_metadata(loader_data, method_data());
set_method_data(NULL);
MetadataFactory::free_metadata(loader_data, method_counters());
set_method_counters(NULL);
clear_method_counters();
// The nmethod will be gone when we get here.
if (code() != NULL) _code = NULL;
}
......@@ -388,9 +388,7 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
methodHandle mh(m);
ClassLoaderData* loader_data = mh->method_holder()->class_loader_data();
MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL);
if (mh->method_counters() == NULL) {
mh->set_method_counters(counters);
} else {
if (!mh->init_method_counters(counters)) {
MetadataFactory::free_metadata(loader_data, counters);
}
return mh->method_counters();
......@@ -852,7 +850,7 @@ void Method::unlink_method() {
assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?");
set_method_data(NULL);
set_method_counters(NULL);
clear_method_counters();
}
// Called when the method_holder is getting linked. Setup entrypoints so the method
......
......@@ -365,11 +365,13 @@ class Method : public Metadata {
return _method_counters;
}
void set_method_counters(MethodCounters* counters) {
// The store into method must be released. On platforms without
// total store order (TSO) the reference may become visible before
// the initialization of data otherwise.
OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters);
void clear_method_counters() {
_method_counters = NULL;
}
bool init_method_counters(MethodCounters* counters) {
// Try to install a pointer to MethodCounters, return true on success.
return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL;
}
#ifdef TIERED
......
......@@ -36,21 +36,22 @@
#include "utilities/numberSeq.hpp"
// This is a generic hashtable, designed to be used for the symbol
// and string tables.
//
// It is implemented as an open hash table with a fixed number of buckets.
//
// %note:
// - HashtableEntrys are allocated in blocks to reduce the space overhead.
template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
BasicHashtableEntry<F>* entry;
// This hashtable is implemented as an open hash table with a fixed number of buckets.
if (_free_list) {
template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry_free_list() {
BasicHashtableEntry<F>* entry = NULL;
if (_free_list != NULL) {
entry = _free_list;
_free_list = _free_list->next();
} else {
}
return entry;
}
// HashtableEntrys are allocated in blocks to reduce the space overhead.
template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsigned int hashValue) {
BasicHashtableEntry<F>* entry = new_entry_free_list();
if (entry == NULL) {
if (_first_free_entry + _entry_size >= _end_block) {
int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries));
int len = _entry_size * block_size;
......@@ -83,9 +84,9 @@ template <class T, MEMFLAGS F> HashtableEntry<T, F>* Hashtable<T, F>::new_entry(
// This is somewhat an arbitrary heuristic but if one bucket gets to
// rehash_count which is currently 100, there's probably something wrong.
template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
assert(table_size() != 0, "underflow");
if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) {
template <class T, MEMFLAGS F> bool RehashableHashtable<T, F>::check_rehash_table(int count) {
assert(this->table_size() != 0, "underflow");
if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) {
// Set a flag for the next safepoint, which should be at some guaranteed
// safepoint interval.
return true;
......@@ -93,13 +94,13 @@ template <MEMFLAGS F> bool BasicHashtable<F>::check_rehash_table(int count) {
return false;
}
template <class T, MEMFLAGS F> juint Hashtable<T, F>::_seed = 0;
template <class T, MEMFLAGS F> juint RehashableHashtable<T, F>::_seed = 0;
// Create a new table and using alternate hash code, populate the new table
// with the existing elements. This can be used to change the hash code
// and could in the future change the size of the table.
template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* new_table) {
template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::move_to(RehashableHashtable<T, F>* new_table) {
// Initialize the global seed for hashing.
_seed = AltHashing::compute_seed();
......@@ -109,7 +110,7 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::move_to(Hashtable<T, F>* ne
// Iterate through the table and create a new entry for the new table
for (int i = 0; i < new_table->table_size(); ++i) {
for (HashtableEntry<T, F>* p = bucket(i); p != NULL; ) {
for (HashtableEntry<T, F>* p = this->bucket(i); p != NULL; ) {
HashtableEntry<T, F>* next = p->next();
T string = p->literal();
// Use alternate hashing algorithm on the symbol in the first table
......@@ -238,11 +239,11 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::reverse(void* boundary) {
}
}
template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(Symbol *symbol) {
template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(Symbol *symbol) {
return symbol->size() * HeapWordSize;
}
template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
template <class T, MEMFLAGS F> int RehashableHashtable<T, F>::literal_size(oop oop) {
// NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true,
// and the String.value array is shared by several Strings. However, starting from JDK8,
// the String.value array is not shared anymore.
......@@ -255,12 +256,12 @@ template <class T, MEMFLAGS F> int Hashtable<T, F>::literal_size(oop oop) {
// Note: if you create a new subclass of Hashtable<MyNewType, F>, you will need to
// add a new function Hashtable<T, F>::literal_size(MyNewType lit)
template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
template <class T, MEMFLAGS F> void RehashableHashtable<T, F>::dump_table(outputStream* st, const char *table_name) {
NumberSeq summary;
int literal_bytes = 0;
for (int i = 0; i < this->table_size(); ++i) {
int count = 0;
for (HashtableEntry<T, F>* e = bucket(i);
for (HashtableEntry<T, F>* e = this->bucket(i);
e != NULL; e = e->next()) {
count++;
literal_bytes += literal_size(e->literal());
......@@ -270,7 +271,7 @@ template <class T, MEMFLAGS F> void Hashtable<T, F>::dump_table(outputStream* st
double num_buckets = summary.num();
double num_entries = summary.sum();
int bucket_bytes = (int)num_buckets * sizeof(bucket(0));
int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket<F>);
int entry_bytes = (int)num_entries * sizeof(HashtableEntry<T, F>);
int total_bytes = literal_bytes + bucket_bytes + entry_bytes;
......@@ -352,12 +353,20 @@ template <MEMFLAGS F> void BasicHashtable<F>::verify_lookup_length(double load)
#endif
// Explicitly instantiate these types
#if INCLUDE_ALL_GCS
template class Hashtable<nmethod*, mtGC>;
template class HashtableEntry<nmethod*, mtGC>;
template class BasicHashtable<mtGC>;
#endif
template class Hashtable<ConstantPool*, mtClass>;
template class RehashableHashtable<Symbol*, mtSymbol>;
template class RehashableHashtable<oopDesc*, mtSymbol>;
template class Hashtable<Symbol*, mtSymbol>;
template class Hashtable<Klass*, mtClass>;
template class Hashtable<oop, mtClass>;
#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
template class Hashtable<oop, mtSymbol>;
template class RehashableHashtable<oop, mtSymbol>;
#endif // SOLARIS || CHECK_UNHANDLED_OOPS
template class Hashtable<oopDesc*, mtSymbol>;
template class Hashtable<Symbol*, mtClass>;
......
......@@ -178,11 +178,6 @@ protected:
void verify_lookup_length(double load);
#endif
enum {
rehash_count = 100,
rehash_multiple = 60
};
void initialize(int table_size, int entry_size, int number_of_entries);
// Accessor
......@@ -194,12 +189,12 @@ protected:
// The following method is not MT-safe and must be done under lock.
BasicHashtableEntry<F>** bucket_addr(int i) { return _buckets[i].entry_addr(); }
// Attempt to get an entry from the free list
BasicHashtableEntry<F>* new_entry_free_list();
// Table entry management
BasicHashtableEntry<F>* new_entry(unsigned int hashValue);
// Check that the table is unbalanced
bool check_rehash_table(int count);
// Used when moving the entry to another table
// Clean up links, but do not add to free_list
void unlink_entry(BasicHashtableEntry<F>* entry) {
......@@ -277,8 +272,30 @@ protected:
return (HashtableEntry<T, F>**)BasicHashtable<F>::bucket_addr(i);
}
};
template <class T, MEMFLAGS F> class RehashableHashtable : public Hashtable<T, F> {
protected:
enum {
rehash_count = 100,
rehash_multiple = 60
};
// Check that the table is unbalanced
bool check_rehash_table(int count);
public:
RehashableHashtable(int table_size, int entry_size)
: Hashtable<T, F>(table_size, entry_size) { }
RehashableHashtable(int table_size, int entry_size,
HashtableBucket<F>* buckets, int number_of_entries)
: Hashtable<T, F>(table_size, entry_size, buckets, number_of_entries) { }
// Function to move these elements into the new table.
void move_to(Hashtable<T, F>* new_table);
void move_to(RehashableHashtable<T, F>* new_table);
static bool use_alternate_hashcode() { return _seed != 0; }
static juint seed() { return _seed; }
......@@ -292,7 +309,6 @@ protected:
static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;}
static int literal_size(Klass *k) {Unimplemented(); return 0;}
public:
void dump_table(outputStream* st, const char *table_name);
private:
......
......@@ -180,8 +180,8 @@ ifdef TESTDIRS
JTREG_TESTDIRS = $(TESTDIRS)
endif
# Default JTREG to run (win32 script works for everybody)
JTREG = $(JT_HOME)/win32/bin/jtreg
# Default JTREG to run
JTREG = $(JT_HOME)/bin/jtreg
# Option to tell jtreg to not run tests marked with "ignore"
ifeq ($(PLATFORM), windows)
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8058744
* @summary Invalid pattern-matching of address computations in raw unsafe
* @library /testlibrary
* @run main/othervm -Xbatch UnsafeRaw
*/
import com.oracle.java.testlibrary.Utils;
import java.util.Random;
public class UnsafeRaw {
public static class Tests {
public static int int_index(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
return unsafe.getInt(base + (index << 2));
}
public static int long_index(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
return unsafe.getInt(base + (index << 2));
}
public static int int_index_back_ashift(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
return unsafe.getInt(base + (index >> 2));
}
public static int int_index_back_lshift(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
return unsafe.getInt(base + (index >>> 2));
}
public static int long_index_back_ashift(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
return unsafe.getInt(base + (index >> 2));
}
public static int long_index_back_lshift(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
return unsafe.getInt(base + (index >>> 2));
}
public static int int_const_12345678_index(sun.misc.Unsafe unsafe, long base) throws Exception {
int idx4 = 0x12345678;
return unsafe.getInt(base + idx4);
}
public static int long_const_1234567890abcdef_index(sun.misc.Unsafe unsafe, long base) throws Exception {
long idx5 = 0x1234567890abcdefL;
return unsafe.getInt(base + idx5);
}
public static int int_index_mul(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
return unsafe.getInt(base + (index * 4));
}
public static int long_index_mul(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
return unsafe.getInt(base + (index * 4));
}
public static int int_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, int index) throws Exception {
return unsafe.getInt(base + (index * 16));
}
public static int long_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, long index) throws Exception {
return unsafe.getInt(base + (index * 16));
}
}
public static void main(String[] args) throws Exception {
sun.misc.Unsafe unsafe = Utils.getUnsafe();
final int array_size = 128;
final int element_size = 4;
final int magic = 0x12345678;
Random rnd = new Random();
long array = unsafe.allocateMemory(array_size * element_size); // 128 ints
long addr = array + array_size * element_size / 2; // something in the middle to work with
unsafe.putInt(addr, magic);
for (int j = 0; j < 100000; j++) {
if (Tests.int_index(unsafe, addr, 0) != magic) throw new Exception();
if (Tests.long_index(unsafe, addr, 0) != magic) throw new Exception();
if (Tests.int_index_mul(unsafe, addr, 0) != magic) throw new Exception();
if (Tests.long_index_mul(unsafe, addr, 0) != magic) throw new Exception();
{
long idx1 = rnd.nextLong();
long addr1 = addr - (idx1 << 2);
if (Tests.long_index(unsafe, addr1, idx1) != magic) throw new Exception();
}
{
long idx2 = rnd.nextLong();
long addr2 = addr - (idx2 >> 2);
if (Tests.long_index_back_ashift(unsafe, addr2, idx2) != magic) throw new Exception();
}
{
long idx3 = rnd.nextLong();
long addr3 = addr - (idx3 >>> 2);
if (Tests.long_index_back_lshift(unsafe, addr3, idx3) != magic) throw new Exception();
}
{
long idx4 = 0x12345678;
long addr4 = addr - idx4;
if (Tests.int_const_12345678_index(unsafe, addr4) != magic) throw new Exception();
}
{
long idx5 = 0x1234567890abcdefL;
long addr5 = addr - idx5;
if (Tests.long_const_1234567890abcdef_index(unsafe, addr5) != magic) throw new Exception();
}
{
int idx6 = rnd.nextInt();
long addr6 = addr - (idx6 >> 2);
if (Tests.int_index_back_ashift(unsafe, addr6, idx6) != magic) throw new Exception();
}
{
int idx7 = rnd.nextInt();
long addr7 = addr - (idx7 >>> 2);
if (Tests.int_index_back_lshift(unsafe, addr7, idx7) != magic) throw new Exception();
}
{
int idx8 = rnd.nextInt();
long addr8 = addr - (idx8 * 16);
if (Tests.int_index_mul_scale_16(unsafe, addr8, idx8) != magic) throw new Exception();
}
{
long idx9 = rnd.nextLong();
long addr9 = addr - (idx9 * 16);
if (Tests.long_index_mul_scale_16(unsafe, addr9, idx9) != magic) throw new Exception();
}
}
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test ArchiveDoesNotExist
* @summary Test how VM handles "file does not exist" situation while
* attempting to use CDS archive. JVM should exit gracefully
* when sharing mode is ON, and continue w/o sharing if sharing
* mode is AUTO.
* @library /testlibrary
* @run main ArchiveDoesNotExist
*/
import com.oracle.java.testlibrary.*;
import java.io.File;
public class ArchiveDoesNotExist {
public static void main(String[] args) throws Exception {
String fileName = "test.jsa";
File cdsFile = new File(fileName);
if (cdsFile.exists())
throw new RuntimeException("Test error: cds file already exists");
// Sharing: on
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
"-Xshare:on",
"-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Specified shared archive not found");
output.shouldHaveExitValue(1);
// Sharing: auto
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
"-Xshare:auto",
"-version");
output = new OutputAnalyzer(pb.start());
output.shouldMatch("(java|openjdk) version");
output.shouldNotContain("sharing");
output.shouldHaveExitValue(0);
}
}
/*
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -29,6 +29,7 @@
* is different from object alignment for creating a CDS file
* should fail when loading.
* @library /testlibrary
* @bug 8025642
*/
import com.oracle.java.testlibrary.*;
......@@ -82,7 +83,11 @@ public class CdsDifferentObjectAlignment {
createAlignment,
loadAlignment);
output.shouldContain(expectedErrorMsg);
try {
output.shouldContain(expectedErrorMsg);
} catch (RuntimeException e) {
output.shouldContain("Unable to use shared archive");
}
output.shouldHaveExitValue(1);
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test DefaultUseWithClient
* @summary Test default behavior of sharing with -client
* @library /testlibrary
* @run main DefaultUseWithClient
* @bug 8032224
*/
import com.oracle.java.testlibrary.*;
import java.io.File;
public class DefaultUseWithClient {
public static void main(String[] args) throws Exception {
String fileName = "test.jsa";
// On 32-bit windows CDS should be on by default in "-client" config
// Skip this test on any other platform
boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit());
if (!is32BitWindows) {
System.out.println("Test only applicable on 32-bit Windows. Skipping");
return;
}
// create the archive
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
"-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
"-client",
"-XX:+PrintSharedSpaces",
"-version");
output = new OutputAnalyzer(pb.start());
try {
output.shouldContain("sharing");
} catch (RuntimeException e) {
// if sharing failed due to ASLR or similar reasons,
// check whether sharing was attempted at all (UseSharedSpaces)
output.shouldContain("UseSharedSpaces:");
}
output.shouldHaveExitValue(0);
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/* @test LimitSharedSizes
* @summary Test handling of limits on shared space size
* @library /testlibrary
* @run main LimitSharedSizes
*/
import com.oracle.java.testlibrary.*;
public class LimitSharedSizes {
private static class SharedSizeTestData {
public String optionName;
public String optionValue;
public String expectedErrorMsg;
public SharedSizeTestData(String name, String value, String msg) {
optionName = name;
optionValue = value;
expectedErrorMsg = msg;
}
}
private static final SharedSizeTestData[] testTable = {
// values in this part of the test table should cause failure
// (shared space sizes are deliberately too small)
new SharedSizeTestData("-XX:SharedReadOnlySize", "4M", "read only"),
new SharedSizeTestData("-XX:SharedReadWriteSize","4M", "read write"),
// Known issue, JDK-8038422 (assert() on Windows)
// new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"),
// This will cause a VM crash; commenting out for now; see bug JDK-8038268
// @ignore JDK-8038268
// new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"),
// these values are larger than default ones, but should
// be acceptable and not cause failure
new SharedSizeTestData("-XX:SharedReadOnlySize", "20M", null),
new SharedSizeTestData("-XX:SharedReadWriteSize", "20M", null),
new SharedSizeTestData("-XX:SharedMiscDataSize", "20M", null),
new SharedSizeTestData("-XX:SharedMiscCodeSize", "20M", null)
};
public static void main(String[] args) throws Exception {
String fileName = "test.jsa";
for (SharedSizeTestData td : testTable) {
String option = td.optionName + "=" + td.optionValue;
System.out.println("testing option <" + option + ">");
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./" + fileName,
option,
"-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
if (td.expectedErrorMsg != null) {
output.shouldContain("The shared " + td.expectedErrorMsg
+ " space is not large enough");
output.shouldHaveExitValue(2);
} else {
output.shouldNotContain("space is not large enough");
output.shouldHaveExitValue(0);
}
}
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test SharedBaseAddress
* @summary Test variety of values for SharedBaseAddress, making sure
* VM handles normal values as well as edge values w/o a crash.
* @library /testlibrary
* @run main SharedBaseAddress
*/
import com.oracle.java.testlibrary.*;
public class SharedBaseAddress {
// shared base address test table
private static final String[] testTable = {
"1g", "8g", "64g","512g", "4t",
"32t", "128t", "0",
"1", "64k", "64M"
};
public static void main(String[] args) throws Exception {
// Known issue on Solaris-Sparc
// @ignore JDK-8044600
if (Platform.isSolaris() && Platform.isSparc())
return;
for (String testEntry : testTable) {
System.out.println("sharedBaseAddress = " + testEntry);
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=test.jsa",
"-XX:SharedBaseAddress=" + testEntry,
"-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Loading classes to share");
try {
pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=test.jsa",
"-Xshare:on",
"-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
} catch (RuntimeException e) {
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(1);
}
}
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test SpaceUtilizationCheck
* @summary Check if the space utilization for shared spaces is adequate
* @library /testlibrary
* @run main SpaceUtilizationCheck
*/
import com.oracle.java.testlibrary.*;
import java.util.regex.Pattern;
import java.util.regex.Matcher;
import java.util.ArrayList;
import java.lang.Integer;
public class SpaceUtilizationCheck {
// Minimum allowed utilization value (percent)
// The goal is to have this number to be 50% for RO and RW regions
// Once that feature is implemented, increase the MIN_UTILIZATION to 50
private static final int MIN_UTILIZATION = 30;
// Only RO and RW regions are considered for this check, since they
// currently account for the bulk of the shared space
private static final int NUMBER_OF_CHECKED_SHARED_REGIONS = 2;
public static void main(String[] args) throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./test.jsa",
"-Xshare:dump");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
String stdout = output.getStdout();
ArrayList<String> utilization = findUtilization(stdout);
if (utilization.size() != NUMBER_OF_CHECKED_SHARED_REGIONS )
throw new RuntimeException("The output format of sharing summary has changed");
for(String str : utilization) {
int value = Integer.parseInt(str);
if (value < MIN_UTILIZATION) {
System.out.println(stdout);
throw new RuntimeException("Utilization for one of the regions" +
"is below a threshold of " + MIN_UTILIZATION + "%");
}
}
}
public static ArrayList<String> findUtilization(String input) {
ArrayList<String> regions = filterRegionsOfInterest(input.split("\n"));
return filterByPattern(filterByPattern(regions, "bytes \\[.*% used\\]"), "\\d+");
}
private static ArrayList<String> filterByPattern(Iterable<String> input, String pattern) {
ArrayList<String> result = new ArrayList<String>();
for (String str : input) {
Matcher matcher = Pattern.compile(pattern).matcher(str);
if (matcher.find()) {
result.add(matcher.group());
}
}
return result;
}
private static ArrayList<String> filterRegionsOfInterest(String[] inputLines) {
ArrayList<String> result = new ArrayList<String>();
for (String str : inputLines) {
if (str.contains("ro space:") || str.contains("rw space:")) {
result.add(str);
}
}
return result;
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册