diff --git a/make/hotspot_version b/make/hotspot_version index 97150e231161f73fdc7304d2c8f8f207cd7b5bac..28a3cf249fb8526148f05318abaf4933b0945a16 100644 --- a/make/hotspot_version +++ b/make/hotspot_version @@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014 HS_MAJOR_VER=25 HS_MINOR_VER=40 -HS_BUILD_NUMBER=11 +HS_BUILD_NUMBER=12 JDK_MAJOR_VER=1 JDK_MINOR_VER=8 diff --git a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp index b3c706dbef4a5c2defb7224a3062e592d85cd697..bc67a72c1fd40b6405d513f4c8bf09fdb3f8c34d 100644 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp @@ -1128,51 +1128,82 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, // Hoist any int/ptr/long's in the first 6 to int regs. // Hoist any flt/dbl's in the first 16 dbl regs. int j = 0; // Count of actual args, not HALVES - for( int i=0; ias_VMReg()); - } else { - // V9ism: floats go in ODD stack slot - regs[i].set1(VMRegImpl::stack2reg(1 + (j<<1))); + // Per SPARC Compliance Definition 2.4.1, page 3P-12 available here + // http://www.sparc.org/wp-content/uploads/2014/01/SCD.2.4.1.pdf.gz + // + // "When a callee prototype exists, and does not indicate variable arguments, + // floating-point values assigned to locations %sp+BIAS+128 through %sp+BIAS+248 + // will be promoted to floating-point registers" + // + // By "promoted" it means that the argument is located in two places, an unused + // spill slot in the "parameter array" (starts at %sp+BIAS+128), and a live + // float register. In most cases, there are 6 or fewer arguments of any type, + // and the standard parameter array slots (%sp+BIAS+128 to %sp+BIAS+176 exclusive) + // serve as shadow slots. Per the spec floating point registers %d6 to %d16 + // require slots beyond that (up to %sp+BIAS+248). + // + { + // V9ism: floats go in ODD registers and stack slots + int float_index = 1 + (j << 1); + param_array_reg.set1(VMRegImpl::stack2reg(float_index)); + if (j < 16) { + regs[i].set1(as_FloatRegister(float_index)->as_VMReg()); + } else { + regs[i] = param_array_reg; + } } break; case T_DOUBLE: - assert( sig_bt[i+1] == T_VOID, "expecting half" ); - if ( j < 16 ) { - // V9ism: doubles go in EVEN/ODD regs - regs[i].set2(as_FloatRegister(j<<1)->as_VMReg()); - } else { - // V9ism: doubles go in EVEN/ODD stack slots - regs[i].set2(VMRegImpl::stack2reg(j<<1)); + { + assert(sig_bt[i + 1] == T_VOID, "expecting half"); + // V9ism: doubles go in EVEN/ODD regs and stack slots + int double_index = (j << 1); + param_array_reg.set2(VMRegImpl::stack2reg(double_index)); + if (j < 16) { + regs[i].set2(as_FloatRegister(double_index)->as_VMReg()); + } else { + // V9ism: doubles go in EVEN/ODD stack slots + regs[i] = param_array_reg; + } } break; - case T_VOID: regs[i].set_bad(); j--; break; // Do not count HALVES + case T_VOID: + regs[i].set_bad(); + j--; + break; // Do not count HALVES default: ShouldNotReachHere(); } - if (regs[i].first()->is_stack()) { - int off = regs[i].first()->reg2stack(); + // Keep track of the deepest parameter array slot. + if (!param_array_reg.first()->is_valid()) { + param_array_reg = regs[i]; + } + if (param_array_reg.first()->is_stack()) { + int off = param_array_reg.first()->reg2stack(); if (off > max_stack_slots) max_stack_slots = off; } - if (regs[i].second()->is_stack()) { - int off = regs[i].second()->reg2stack(); + if (param_array_reg.second()->is_stack()) { + int off = param_array_reg.second()->reg2stack(); if (off > max_stack_slots) max_stack_slots = off; } } @@ -1180,8 +1211,8 @@ int SharedRuntime::c_calling_convention(const BasicType *sig_bt, #else // _LP64 // V8 convention: first 6 things in O-regs, rest on stack. // Alignment is willy-nilly. - for( int i=0; iis_stack()) { - int off = regs[i].first()->reg2stack(); + int off = regs[i].first()->reg2stack(); if (off > max_stack_slots) max_stack_slots = off; } if (regs[i].second()->is_stack()) { - int off = regs[i].second()->reg2stack(); + int off = regs[i].second()->reg2stack(); if (off > max_stack_slots) max_stack_slots = off; } } @@ -1357,11 +1388,10 @@ static void object_move(MacroAssembler* masm, const Register rOop = src.first()->as_Register(); const Register rHandle = L5; int oop_slot = rOop->input_number() * VMRegImpl::slots_per_word + oop_handle_offset; - int offset = oop_slot*VMRegImpl::stack_slot_size; - Label skip; + int offset = oop_slot * VMRegImpl::stack_slot_size; __ st_ptr(rOop, SP, offset + STACK_BIAS); if (is_receiver) { - *receiver_offset = oop_slot * VMRegImpl::stack_slot_size; + *receiver_offset = offset; } map->set_oop(VMRegImpl::stack2reg(oop_slot)); __ add(SP, offset + STACK_BIAS, rHandle); diff --git a/src/cpu/sparc/vm/sparc.ad b/src/cpu/sparc/vm/sparc.ad index 701c161c4fb866965c64949f647a82efc0a40d09..3383bc21e0b424a90862ff6738d3e29df9cd67b6 100644 --- a/src/cpu/sparc/vm/sparc.ad +++ b/src/cpu/sparc/vm/sparc.ad @@ -1989,7 +1989,7 @@ void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { // to implement the UseStrictFP mode. const bool Matcher::strict_fp_requires_explicit_rounding = false; -// Are floats conerted to double when stored to stack during deoptimization? +// Are floats converted to double when stored to stack during deoptimization? // Sparc does not handle callee-save floats. bool Matcher::float_in_double() { return false; } @@ -3218,7 +3218,7 @@ enc_class enc_Array_Equals(o0RegP ary1, o1RegP ary2, g3RegP tmp1, notemp_iRegI r // are owned by the CALLEE. Holes should not be nessecary in the // incoming area, as the Java calling convention is completely under // the control of the AD file. Doubles can be sorted and packed to -// avoid holes. Holes in the outgoing arguments may be nessecary for +// avoid holes. Holes in the outgoing arguments may be necessary for // varargs C calling conventions. // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is // even aligned with pad0 as needed. @@ -3284,7 +3284,7 @@ frame %{ %} // Body of function which returns an OptoRegs array locating - // arguments either in registers or in stack slots for callin + // arguments either in registers or in stack slots for calling // C. c_calling_convention %{ // This is obviously always outgoing diff --git a/src/share/vm/c1/c1_Canonicalizer.cpp b/src/share/vm/c1/c1_Canonicalizer.cpp index 932bfb30b9568bf66bd798a02e03890235c88895..bbc5f47778303beb0ab28935285e87210663d265 100644 --- a/src/share/vm/c1/c1_Canonicalizer.cpp +++ b/src/share/vm/c1/c1_Canonicalizer.cpp @@ -327,7 +327,7 @@ void Canonicalizer::do_ShiftOp (ShiftOp* x) { if (t2->is_constant()) { switch (t2->tag()) { case intTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return; - case longTag : if (t2->as_IntConstant()->value() == 0) set_canonical(x->x()); return; + case longTag : if (t2->as_LongConstant()->value() == (jlong)0) set_canonical(x->x()); return; default : ShouldNotReachHere(); } } @@ -808,28 +808,41 @@ void Canonicalizer::do_ExceptionObject(ExceptionObject* x) {} static bool match_index_and_scale(Instruction* instr, Instruction** index, - int* log2_scale, - Instruction** instr_to_unpin) { - *instr_to_unpin = NULL; - - // Skip conversion ops + int* log2_scale) { + // Skip conversion ops. This works only on 32bit because of the implicit l2i that the + // unsafe performs. +#ifndef _LP64 Convert* convert = instr->as_Convert(); - if (convert != NULL) { + if (convert != NULL && convert->op() == Bytecodes::_i2l) { + assert(convert->value()->type() == intType, "invalid input type"); instr = convert->value(); } +#endif ShiftOp* shift = instr->as_ShiftOp(); if (shift != NULL) { - if (shift->is_pinned()) { - *instr_to_unpin = shift; + if (shift->op() == Bytecodes::_lshl) { + assert(shift->x()->type() == longType, "invalid input type"); + } else { +#ifndef _LP64 + if (shift->op() == Bytecodes::_ishl) { + assert(shift->x()->type() == intType, "invalid input type"); + } else { + return false; + } +#else + return false; +#endif } + + // Constant shift value? Constant* con = shift->y()->as_Constant(); if (con == NULL) return false; // Well-known type and value? IntConstant* val = con->type()->as_IntConstant(); - if (val == NULL) return false; - if (shift->x()->type() != intType) return false; + assert(val != NULL, "Should be an int constant"); + *index = shift->x(); int tmp_scale = val->value(); if (tmp_scale >= 0 && tmp_scale < 4) { @@ -842,31 +855,42 @@ static bool match_index_and_scale(Instruction* instr, ArithmeticOp* arith = instr->as_ArithmeticOp(); if (arith != NULL) { - if (arith->is_pinned()) { - *instr_to_unpin = arith; + // See if either arg is a known constant + Constant* con = arith->x()->as_Constant(); + if (con != NULL) { + *index = arith->y(); + } else { + con = arith->y()->as_Constant(); + if (con == NULL) return false; + *index = arith->x(); } + long const_value; // Check for integer multiply - if (arith->op() == Bytecodes::_imul) { - // See if either arg is a known constant - Constant* con = arith->x()->as_Constant(); - if (con != NULL) { - *index = arith->y(); + if (arith->op() == Bytecodes::_lmul) { + assert((*index)->type() == longType, "invalid input type"); + LongConstant* val = con->type()->as_LongConstant(); + assert(val != NULL, "expecting a long constant"); + const_value = val->value(); + } else { +#ifndef _LP64 + if (arith->op() == Bytecodes::_imul) { + assert((*index)->type() == intType, "invalid input type"); + IntConstant* val = con->type()->as_IntConstant(); + assert(val != NULL, "expecting an int constant"); + const_value = val->value(); } else { - con = arith->y()->as_Constant(); - if (con == NULL) return false; - *index = arith->x(); - } - if ((*index)->type() != intType) return false; - // Well-known type and value? - IntConstant* val = con->type()->as_IntConstant(); - if (val == NULL) return false; - switch (val->value()) { - case 1: *log2_scale = 0; return true; - case 2: *log2_scale = 1; return true; - case 4: *log2_scale = 2; return true; - case 8: *log2_scale = 3; return true; - default: return false; + return false; } +#else + return false; +#endif + } + switch (const_value) { + case 1: *log2_scale = 0; return true; + case 2: *log2_scale = 1; return true; + case 4: *log2_scale = 2; return true; + case 8: *log2_scale = 3; return true; + default: return false; } } @@ -879,29 +903,37 @@ static bool match(UnsafeRawOp* x, Instruction** base, Instruction** index, int* log2_scale) { - Instruction* instr_to_unpin = NULL; ArithmeticOp* root = x->base()->as_ArithmeticOp(); if (root == NULL) return false; // Limit ourselves to addition for now if (root->op() != Bytecodes::_ladd) return false; + + bool match_found = false; // Try to find shift or scale op - if (match_index_and_scale(root->y(), index, log2_scale, &instr_to_unpin)) { + if (match_index_and_scale(root->y(), index, log2_scale)) { *base = root->x(); - } else if (match_index_and_scale(root->x(), index, log2_scale, &instr_to_unpin)) { + match_found = true; + } else if (match_index_and_scale(root->x(), index, log2_scale)) { *base = root->y(); - } else if (root->y()->as_Convert() != NULL) { + match_found = true; + } else if (NOT_LP64(root->y()->as_Convert() != NULL) LP64_ONLY(false)) { + // Skipping i2l works only on 32bit because of the implicit l2i that the unsafe performs. + // 64bit needs a real sign-extending conversion. Convert* convert = root->y()->as_Convert(); - if (convert->op() == Bytecodes::_i2l && convert->value()->type() == intType) { + if (convert->op() == Bytecodes::_i2l) { + assert(convert->value()->type() == intType, "should be an int"); // pick base and index, setting scale at 1 *base = root->x(); *index = convert->value(); *log2_scale = 0; - } else { - return false; + match_found = true; } - } else { - // doesn't match any expected sequences - return false; + } + // The default solution + if (!match_found) { + *base = root->x(); + *index = root->y(); + *log2_scale = 0; } // If the value is pinned then it will be always be computed so diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp index 890ccb7e68a38b2e58cf4c9925a66b88b16fa1cf..aefc1387b36ef0aa68309e5af12c13fe8f6b5cd8 100644 --- a/src/share/vm/c1/c1_LIRGenerator.cpp +++ b/src/share/vm/c1/c1_LIRGenerator.cpp @@ -2042,6 +2042,8 @@ void LIRGenerator::do_RoundFP(RoundFP* x) { } } +// Here UnsafeGetRaw may have x->base() and x->index() be int or long +// on both 64 and 32 bits. Expecting x->base() to be always long on 64bit. void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { LIRItem base(x->base(), this); LIRItem idx(this); @@ -2056,50 +2058,73 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) { int log2_scale = 0; if (x->has_index()) { - assert(x->index()->type()->tag() == intTag, "should not find non-int index"); log2_scale = x->log2_scale(); } assert(!x->has_index() || idx.value() == x->index(), "should match"); LIR_Opr base_op = base.result(); + LIR_Opr index_op = idx.result(); #ifndef _LP64 if (x->base()->type()->tag() == longTag) { base_op = new_register(T_INT); __ convert(Bytecodes::_l2i, base.result(), base_op); - } else { - assert(x->base()->type()->tag() == intTag, "must be"); } + if (x->has_index()) { + if (x->index()->type()->tag() == longTag) { + LIR_Opr long_index_op = index_op; + if (x->index()->type()->is_constant()) { + long_index_op = new_register(T_LONG); + __ move(index_op, long_index_op); + } + index_op = new_register(T_INT); + __ convert(Bytecodes::_l2i, long_index_op, index_op); + } else { + assert(x->index()->type()->tag() == intTag, "must be"); + } + } + // At this point base and index should be all ints. + assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int"); + assert(!x->has_index() || index_op->type() == T_INT, "index should be an int"); +#else + if (x->has_index()) { + if (x->index()->type()->tag() == intTag) { + if (!x->index()->type()->is_constant()) { + index_op = new_register(T_LONG); + __ convert(Bytecodes::_i2l, idx.result(), index_op); + } + } else { + assert(x->index()->type()->tag() == longTag, "must be"); + if (x->index()->type()->is_constant()) { + index_op = new_register(T_LONG); + __ move(idx.result(), index_op); + } + } + } + // At this point base is a long non-constant + // Index is a long register or a int constant. + // We allow the constant to stay an int because that would allow us a more compact encoding by + // embedding an immediate offset in the address expression. If we have a long constant, we have to + // move it into a register first. + assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a long non-constant"); + assert(!x->has_index() || (index_op->type() == T_INT && index_op->is_constant()) || + (index_op->type() == T_LONG && !index_op->is_constant()), "unexpected index type"); #endif BasicType dst_type = x->basic_type(); - LIR_Opr index_op = idx.result(); LIR_Address* addr; if (index_op->is_constant()) { assert(log2_scale == 0, "must not have a scale"); + assert(index_op->type() == T_INT, "only int constants supported"); addr = new LIR_Address(base_op, index_op->as_jint(), dst_type); } else { #ifdef X86 -#ifdef _LP64 - if (!index_op->is_illegal() && index_op->type() == T_INT) { - LIR_Opr tmp = new_pointer_register(); - __ convert(Bytecodes::_i2l, index_op, tmp); - index_op = tmp; - } -#endif addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type); #elif defined(ARM) addr = generate_address(base_op, index_op, log2_scale, 0, dst_type); #else if (index_op->is_illegal() || log2_scale == 0) { -#ifdef _LP64 - if (!index_op->is_illegal() && index_op->type() == T_INT) { - LIR_Opr tmp = new_pointer_register(); - __ convert(Bytecodes::_i2l, index_op, tmp); - index_op = tmp; - } -#endif addr = new LIR_Address(base_op, index_op, dst_type); } else { LIR_Opr tmp = new_pointer_register(); @@ -2126,7 +2151,6 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { BasicType type = x->basic_type(); if (x->has_index()) { - assert(x->index()->type()->tag() == intTag, "should not find non-int index"); log2_scale = x->log2_scale(); } @@ -2149,38 +2173,39 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) { set_no_result(x); LIR_Opr base_op = base.result(); + LIR_Opr index_op = idx.result(); + #ifndef _LP64 if (x->base()->type()->tag() == longTag) { base_op = new_register(T_INT); __ convert(Bytecodes::_l2i, base.result(), base_op); - } else { - assert(x->base()->type()->tag() == intTag, "must be"); } + if (x->has_index()) { + if (x->index()->type()->tag() == longTag) { + index_op = new_register(T_INT); + __ convert(Bytecodes::_l2i, idx.result(), index_op); + } + } + // At this point base and index should be all ints and not constants + assert(base_op->type() == T_INT && !base_op->is_constant(), "base should be an non-constant int"); + assert(!x->has_index() || (index_op->type() == T_INT && !index_op->is_constant()), "index should be an non-constant int"); +#else + if (x->has_index()) { + if (x->index()->type()->tag() == intTag) { + index_op = new_register(T_LONG); + __ convert(Bytecodes::_i2l, idx.result(), index_op); + } + } + // At this point base and index are long and non-constant + assert(base_op->type() == T_LONG && !base_op->is_constant(), "base must be a non-constant long"); + assert(!x->has_index() || (index_op->type() == T_LONG && !index_op->is_constant()), "index must be a non-constant long"); #endif - LIR_Opr index_op = idx.result(); if (log2_scale != 0) { // temporary fix (platform dependent code without shift on Intel would be better) - index_op = new_pointer_register(); -#ifdef _LP64 - if(idx.result()->type() == T_INT) { - __ convert(Bytecodes::_i2l, idx.result(), index_op); - } else { -#endif - // TODO: ARM also allows embedded shift in the address - __ move(idx.result(), index_op); -#ifdef _LP64 - } -#endif + // TODO: ARM also allows embedded shift in the address __ shift_left(index_op, log2_scale, index_op); } -#ifdef _LP64 - else if(!index_op->is_illegal() && index_op->type() == T_INT) { - LIR_Opr tmp = new_pointer_register(); - __ convert(Bytecodes::_i2l, index_op, tmp); - index_op = tmp; - } -#endif LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type()); __ move(value.result(), addr); diff --git a/src/share/vm/classfile/symbolTable.cpp b/src/share/vm/classfile/symbolTable.cpp index 5bbe1e26914267ee038d8d82cd4839ab06668f01..38868b229df156d197c62feed4ef24d1a3cd23c0 100644 --- a/src/share/vm/classfile/symbolTable.cpp +++ b/src/share/vm/classfile/symbolTable.cpp @@ -205,7 +205,7 @@ Symbol* SymbolTable::lookup(int index, const char* name, } } // If the bucket size is too deep check if this hash code is insufficient. - if (count >= BasicHashtable::rehash_count && !needs_rehashing()) { + if (count >= rehash_count && !needs_rehashing()) { _needs_rehashing = check_rehash_table(count); } return NULL; @@ -656,7 +656,7 @@ oop StringTable::lookup(int index, jchar* name, } } // If the bucket size is too deep check if this hash code is insufficient. - if (count >= BasicHashtable::rehash_count && !needs_rehashing()) { + if (count >= rehash_count && !needs_rehashing()) { _needs_rehashing = check_rehash_table(count); } return NULL; diff --git a/src/share/vm/classfile/symbolTable.hpp b/src/share/vm/classfile/symbolTable.hpp index b0a2fcb38b8f7537c27bf787f77b5416f694ca61..d6584403788690ef63c4c5b0957a12c9c13063bf 100644 --- a/src/share/vm/classfile/symbolTable.hpp +++ b/src/share/vm/classfile/symbolTable.hpp @@ -74,7 +74,7 @@ class TempNewSymbol : public StackObj { operator Symbol*() { return _temp; } }; -class SymbolTable : public Hashtable { +class SymbolTable : public RehashableHashtable { friend class VMStructs; friend class ClassFileParser; @@ -110,10 +110,10 @@ private: Symbol* lookup(int index, const char* name, int len, unsigned int hash); SymbolTable() - : Hashtable(SymbolTableSize, sizeof (HashtableEntry)) {} + : RehashableHashtable(SymbolTableSize, sizeof (HashtableEntry)) {} SymbolTable(HashtableBucket* t, int number_of_entries) - : Hashtable(SymbolTableSize, sizeof (HashtableEntry), t, + : RehashableHashtable(SymbolTableSize, sizeof (HashtableEntry), t, number_of_entries) {} // Arena for permanent symbols (null class loader) that are never unloaded @@ -252,7 +252,7 @@ public: static int parallel_claimed_index() { return _parallel_claimed_idx; } }; -class StringTable : public Hashtable { +class StringTable : public RehashableHashtable { friend class VMStructs; private: @@ -278,11 +278,11 @@ private: // in the range [start_idx, end_idx). static void buckets_unlink_or_oops_do(BoolObjectClosure* is_alive, OopClosure* f, int start_idx, int end_idx, int* processed, int* removed); - StringTable() : Hashtable((int)StringTableSize, + StringTable() : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry)) {} StringTable(HashtableBucket* t, int number_of_entries) - : Hashtable((int)StringTableSize, sizeof (HashtableEntry), t, + : RehashableHashtable((int)StringTableSize, sizeof (HashtableEntry), t, number_of_entries) {} public: // The string table diff --git a/src/share/vm/compiler/compileBroker.cpp b/src/share/vm/compiler/compileBroker.cpp index 89563ef69ceb0cd6fee9520495242a3d13cd7d46..848e7053e875ec00f03bde81f1425b19e690075b 100644 --- a/src/share/vm/compiler/compileBroker.cpp +++ b/src/share/vm/compiler/compileBroker.cpp @@ -1175,6 +1175,12 @@ void CompileBroker::compile_method_base(methodHandle method, return; } + if (TieredCompilation) { + // Tiered policy requires MethodCounters to exist before adding a method to + // the queue. Create if we don't have them yet. + method->get_method_counters(thread); + } + // Outputs from the following MutexLocker block: CompileTask* task = NULL; bool blocking = false; diff --git a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp index 159a445b325dc2e3475163433a9f51066f92efd3..c7cad45fe7d6cc56f51816e8d2a0c4d20dee7fb4 100644 --- a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.cpp @@ -22,372 +22,386 @@ * */ - #include "precompiled.hpp" +#include "code/codeCache.hpp" #include "code/nmethod.hpp" #include "gc_implementation/g1/g1CodeCacheRemSet.hpp" +#include "gc_implementation/g1/heapRegion.hpp" +#include "memory/heap.hpp" #include "memory/iterator.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/hashtable.inline.hpp" +#include "utilities/stack.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC -G1CodeRootChunk::G1CodeRootChunk() : _top(NULL), _next(NULL), _prev(NULL), _free(NULL) { - _top = bottom(); -} +class CodeRootSetTable : public Hashtable { + friend class G1CodeRootSetTest; + typedef HashtableEntry Entry; -void G1CodeRootChunk::reset() { - _next = _prev = NULL; - _free = NULL; - _top = bottom(); -} + static CodeRootSetTable* volatile _purge_list; -void G1CodeRootChunk::nmethods_do(CodeBlobClosure* cl) { - NmethodOrLink* cur = bottom(); - while (cur != _top) { - if (is_nmethod(cur)) { - cl->do_code_blob(cur->_nmethod); - } - cur++; + CodeRootSetTable* _purge_next; + + unsigned int compute_hash(nmethod* nm) { + uintptr_t hash = (uintptr_t)nm; + return hash ^ (hash >> 7); // code heap blocks are 128byte aligned } -} -bool G1CodeRootChunk::remove_lock_free(nmethod* method) { - NmethodOrLink* cur = bottom(); + void remove_entry(Entry* e, Entry* previous); + Entry* new_entry(nmethod* nm); - for (NmethodOrLink* cur = bottom(); cur != _top; cur++) { - if (cur->_nmethod == method) { - bool result = Atomic::cmpxchg_ptr(NULL, &cur->_nmethod, method) == method; + public: + CodeRootSetTable(int size) : Hashtable(size, sizeof(Entry)), _purge_next(NULL) {} + ~CodeRootSetTable(); - if (!result) { - // Someone else cleared out this entry. - return false; - } + // Needs to be protected locks + bool add(nmethod* nm); + bool remove(nmethod* nm); - // The method was cleared. Time to link it into the free list. - NmethodOrLink* prev_free; - do { - prev_free = (NmethodOrLink*)_free; - cur->_link = prev_free; - } while (Atomic::cmpxchg_ptr(cur, &_free, prev_free) != prev_free); + // Can be called without locking + bool contains(nmethod* nm); - return true; - } - } + int entry_size() const { return BasicHashtable::entry_size(); } - return false; -} + void copy_to(CodeRootSetTable* new_table); + void nmethods_do(CodeBlobClosure* blk); -G1CodeRootChunkManager::G1CodeRootChunkManager() : _free_list(), _num_chunks_handed_out(0) { - _free_list.initialize(); - _free_list.set_size(G1CodeRootChunk::word_size()); -} + template + int remove_if(CB& should_remove); -size_t G1CodeRootChunkManager::fl_mem_size() { - return _free_list.count() * _free_list.size(); -} + static void purge_list_append(CodeRootSetTable* tbl); + static void purge(); -void G1CodeRootChunkManager::free_all_chunks(FreeList* list) { - _num_chunks_handed_out -= list->count(); - _free_list.prepend(list); -} + static size_t static_mem_size() { + return sizeof(_purge_list); + } +}; -void G1CodeRootChunkManager::free_chunk(G1CodeRootChunk* chunk) { - _free_list.return_chunk_at_head(chunk); - _num_chunks_handed_out--; -} +CodeRootSetTable* volatile CodeRootSetTable::_purge_list = NULL; -void G1CodeRootChunkManager::purge_chunks(size_t keep_ratio) { - size_t keep = _num_chunks_handed_out * keep_ratio / 100; - if (keep >= (size_t)_free_list.count()) { - return; +CodeRootSetTable::Entry* CodeRootSetTable::new_entry(nmethod* nm) { + unsigned int hash = compute_hash(nm); + Entry* entry = (Entry*) new_entry_free_list(); + if (entry == NULL) { + entry = (Entry*) NEW_C_HEAP_ARRAY2(char, entry_size(), mtGC, CURRENT_PC); } + entry->set_next(NULL); + entry->set_hash(hash); + entry->set_literal(nm); + return entry; +} - FreeList temp; - temp.initialize(); - temp.set_size(G1CodeRootChunk::word_size()); - - _free_list.getFirstNChunksFromList((size_t)_free_list.count() - keep, &temp); +void CodeRootSetTable::remove_entry(Entry* e, Entry* previous) { + int index = hash_to_index(e->hash()); + assert((e == bucket(index)) == (previous == NULL), "if e is the first entry then previous should be null"); - G1CodeRootChunk* cur = temp.get_chunk_at_head(); - while (cur != NULL) { - delete cur; - cur = temp.get_chunk_at_head(); + if (previous == NULL) { + set_entry(index, e->next()); + } else { + previous->set_next(e->next()); } + free_entry(e); } -size_t G1CodeRootChunkManager::static_mem_size() { - return sizeof(G1CodeRootChunkManager); +CodeRootSetTable::~CodeRootSetTable() { + for (int index = 0; index < table_size(); ++index) { + for (Entry* e = bucket(index); e != NULL; ) { + Entry* to_remove = e; + // read next before freeing. + e = e->next(); + unlink_entry(to_remove); + FREE_C_HEAP_ARRAY(char, to_remove, mtGC); + } + } + assert(number_of_entries() == 0, "should have removed all entries"); + free_buckets(); + for (BasicHashtableEntry* e = new_entry_free_list(); e != NULL; e = new_entry_free_list()) { + FREE_C_HEAP_ARRAY(char, e, mtGC); + } } - -G1CodeRootChunk* G1CodeRootChunkManager::new_chunk() { - G1CodeRootChunk* result = _free_list.get_chunk_at_head(); - if (result == NULL) { - result = new G1CodeRootChunk(); +bool CodeRootSetTable::add(nmethod* nm) { + if (!contains(nm)) { + Entry* e = new_entry(nm); + int index = hash_to_index(e->hash()); + add_entry(index, e); + return true; } - _num_chunks_handed_out++; - result->reset(); - return result; + return false; } -#ifndef PRODUCT +bool CodeRootSetTable::contains(nmethod* nm) { + int index = hash_to_index(compute_hash(nm)); + for (Entry* e = bucket(index); e != NULL; e = e->next()) { + if (e->literal() == nm) { + return true; + } + } + return false; +} -size_t G1CodeRootChunkManager::num_chunks_handed_out() const { - return _num_chunks_handed_out; +bool CodeRootSetTable::remove(nmethod* nm) { + int index = hash_to_index(compute_hash(nm)); + Entry* previous = NULL; + for (Entry* e = bucket(index); e != NULL; previous = e, e = e->next()) { + if (e->literal() == nm) { + remove_entry(e, previous); + return true; + } + } + return false; } -size_t G1CodeRootChunkManager::num_free_chunks() const { - return (size_t)_free_list.count(); +void CodeRootSetTable::copy_to(CodeRootSetTable* new_table) { + for (int index = 0; index < table_size(); ++index) { + for (Entry* e = bucket(index); e != NULL; e = e->next()) { + new_table->add(e->literal()); + } + } + new_table->copy_freelist(this); } -#endif +void CodeRootSetTable::nmethods_do(CodeBlobClosure* blk) { + for (int index = 0; index < table_size(); ++index) { + for (Entry* e = bucket(index); e != NULL; e = e->next()) { + blk->do_code_blob(e->literal()); + } + } +} -G1CodeRootChunkManager G1CodeRootSet::_default_chunk_manager; +template +int CodeRootSetTable::remove_if(CB& should_remove) { + int num_removed = 0; + for (int index = 0; index < table_size(); ++index) { + Entry* previous = NULL; + Entry* e = bucket(index); + while (e != NULL) { + Entry* next = e->next(); + if (should_remove(e->literal())) { + remove_entry(e, previous); + ++num_removed; + } else { + previous = e; + } + e = next; + } + } + return num_removed; +} -void G1CodeRootSet::purge_chunks(size_t keep_ratio) { - _default_chunk_manager.purge_chunks(keep_ratio); +G1CodeRootSet::~G1CodeRootSet() { + delete _table; } -size_t G1CodeRootSet::free_chunks_static_mem_size() { - return _default_chunk_manager.static_mem_size(); +CodeRootSetTable* G1CodeRootSet::load_acquire_table() { + return (CodeRootSetTable*) OrderAccess::load_ptr_acquire(&_table); } -size_t G1CodeRootSet::free_chunks_mem_size() { - return _default_chunk_manager.fl_mem_size(); +void G1CodeRootSet::allocate_small_table() { + _table = new CodeRootSetTable(SmallSize); } -G1CodeRootSet::G1CodeRootSet(G1CodeRootChunkManager* manager) : _manager(manager), _list(), _length(0) { - if (_manager == NULL) { - _manager = &_default_chunk_manager; +void CodeRootSetTable::purge_list_append(CodeRootSetTable* table) { + for (;;) { + table->_purge_next = _purge_list; + CodeRootSetTable* old = (CodeRootSetTable*) Atomic::cmpxchg_ptr(table, &_purge_list, table->_purge_next); + if (old == table->_purge_next) { + break; + } } - _list.initialize(); - _list.set_size(G1CodeRootChunk::word_size()); } -G1CodeRootSet::~G1CodeRootSet() { - clear(); +void CodeRootSetTable::purge() { + CodeRootSetTable* table = _purge_list; + _purge_list = NULL; + while (table != NULL) { + CodeRootSetTable* to_purge = table; + table = table->_purge_next; + delete to_purge; + } } -void G1CodeRootSet::add(nmethod* method) { - if (!contains(method)) { - // Find the first chunk that isn't full. - G1CodeRootChunk* cur = _list.head(); - while (cur != NULL) { - if (!cur->is_full()) { - break; - } - cur = cur->next(); - } +void G1CodeRootSet::move_to_large() { + CodeRootSetTable* temp = new CodeRootSetTable(LargeSize); - // All chunks are full, get a new chunk. - if (cur == NULL) { - cur = new_chunk(); - _list.return_chunk_at_head(cur); - } + _table->copy_to(temp); + + CodeRootSetTable::purge_list_append(_table); - // Add the nmethod. - bool result = cur->add(method); + OrderAccess::release_store_ptr(&_table, temp); +} - guarantee(result, err_msg("Not able to add nmethod "PTR_FORMAT" to newly allocated chunk.", method)); - _length++; - } +void G1CodeRootSet::purge() { + CodeRootSetTable::purge(); } -void G1CodeRootSet::remove_lock_free(nmethod* method) { - G1CodeRootChunk* found = find(method); - if (found != NULL) { - bool result = found->remove_lock_free(method); - if (result) { - Atomic::dec_ptr((volatile intptr_t*)&_length); - } - } - assert(!contains(method), err_msg(PTR_FORMAT" still contains nmethod "PTR_FORMAT, this, method)); +size_t G1CodeRootSet::static_mem_size() { + return CodeRootSetTable::static_mem_size(); } -nmethod* G1CodeRootSet::pop() { - while (true) { - G1CodeRootChunk* cur = _list.head(); - if (cur == NULL) { - assert(_length == 0, "when there are no chunks, there should be no elements"); - return NULL; - } - nmethod* result = cur->pop(); - if (result != NULL) { - _length--; - return result; - } else { - free(_list.get_chunk_at_head()); - } +void G1CodeRootSet::add(nmethod* method) { + bool added = false; + if (is_empty()) { + allocate_small_table(); + } + added = _table->add(method); + if (_length == Threshold) { + move_to_large(); + } + if (added) { + ++_length; } } -G1CodeRootChunk* G1CodeRootSet::find(nmethod* method) { - G1CodeRootChunk* cur = _list.head(); - while (cur != NULL) { - if (cur->contains(method)) { - return cur; +bool G1CodeRootSet::remove(nmethod* method) { + bool removed = false; + if (_table != NULL) { + removed = _table->remove(method); + } + if (removed) { + _length--; + if (_length == 0) { + clear(); } - cur = (G1CodeRootChunk*)cur->next(); } - return NULL; -} - -void G1CodeRootSet::free(G1CodeRootChunk* chunk) { - free_chunk(chunk); + return removed; } bool G1CodeRootSet::contains(nmethod* method) { - return find(method) != NULL; + CodeRootSetTable* table = load_acquire_table(); + if (table != NULL) { + return table->contains(method); + } + return false; } void G1CodeRootSet::clear() { - free_all_chunks(&_list); + delete _table; + _table = NULL; _length = 0; } +size_t G1CodeRootSet::mem_size() { + return sizeof(*this) + + (_table != NULL ? sizeof(CodeRootSetTable) + _table->entry_size() * _length : 0); +} + void G1CodeRootSet::nmethods_do(CodeBlobClosure* blk) const { - G1CodeRootChunk* cur = _list.head(); - while (cur != NULL) { - cur->nmethods_do(blk); - cur = (G1CodeRootChunk*)cur->next(); + if (_table != NULL) { + _table->nmethods_do(blk); } } -size_t G1CodeRootSet::static_mem_size() { - return sizeof(G1CodeRootSet); -} +class CleanCallback : public StackObj { + class PointsIntoHRDetectionClosure : public OopClosure { + HeapRegion* _hr; + public: + bool _points_into; + PointsIntoHRDetectionClosure(HeapRegion* hr) : _hr(hr), _points_into(false) {} -size_t G1CodeRootSet::mem_size() { - return G1CodeRootSet::static_mem_size() + _list.count() * _list.size(); -} + void do_oop(narrowOop* o) { + do_oop_work(o); + } -#ifndef PRODUCT + void do_oop(oop* o) { + do_oop_work(o); + } -void G1CodeRootSet::test() { - G1CodeRootChunkManager mgr; + template + void do_oop_work(T* p) { + if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) { + _points_into = true; + } + } + }; - assert(mgr.num_chunks_handed_out() == 0, "Must not have handed out chunks yet"); + PointsIntoHRDetectionClosure _detector; + CodeBlobToOopClosure _blobs; - assert(G1CodeRootChunkManager::static_mem_size() > sizeof(void*), - err_msg("The chunk manager's static memory usage seems too small, is only "SIZE_FORMAT" bytes.", G1CodeRootChunkManager::static_mem_size())); + public: + CleanCallback(HeapRegion* hr) : _detector(hr), _blobs(&_detector, !CodeBlobToOopClosure::FixRelocations) {} - // The number of chunks that we allocate for purge testing. - size_t const num_chunks = 10; + bool operator() (nmethod* nm) { + _detector._points_into = false; + _blobs.do_code_blob(nm); + return !_detector._points_into; + } +}; + +void G1CodeRootSet::clean(HeapRegion* owner) { + CleanCallback should_clean(owner); + if (_table != NULL) { + int removed = _table->remove_if(should_clean); + assert((size_t)removed <= _length, "impossible"); + _length -= removed; + } + if (_length == 0) { + clear(); + } +} - { - G1CodeRootSet set1(&mgr); - assert(set1.is_empty(), "Code root set must be initially empty but is not."); +#ifndef PRODUCT - assert(G1CodeRootSet::static_mem_size() > sizeof(void*), - err_msg("The code root set's static memory usage seems too small, is only "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size())); +class G1CodeRootSetTest { + public: + static void test() { + { + G1CodeRootSet set1; + assert(set1.is_empty(), "Code root set must be initially empty but is not."); - set1.add((nmethod*)1); - assert(mgr.num_chunks_handed_out() == 1, - err_msg("Must have allocated and handed out one chunk, but handed out " - SIZE_FORMAT" chunks", mgr.num_chunks_handed_out())); - assert(set1.length() == 1, err_msg("Added exactly one element, but set contains " - SIZE_FORMAT" elements", set1.length())); + assert(G1CodeRootSet::static_mem_size() == sizeof(void*), + err_msg("The code root set's static memory usage is incorrect, "SIZE_FORMAT" bytes", G1CodeRootSet::static_mem_size())); - // G1CodeRootChunk::word_size() is larger than G1CodeRootChunk::num_entries which - // we cannot access. - for (uint i = 0; i < G1CodeRootChunk::word_size() + 1; i++) { set1.add((nmethod*)1); - } - assert(mgr.num_chunks_handed_out() == 1, - err_msg("Duplicate detection must have prevented allocation of further " - "chunks but allocated "SIZE_FORMAT, mgr.num_chunks_handed_out())); - assert(set1.length() == 1, - err_msg("Duplicate detection should not have increased the set size but " - "is "SIZE_FORMAT, set1.length())); - - size_t num_total_after_add = G1CodeRootChunk::word_size() + 1; - for (size_t i = 0; i < num_total_after_add - 1; i++) { - set1.add((nmethod*)(uintptr_t)(2 + i)); - } - assert(mgr.num_chunks_handed_out() > 1, - "After adding more code roots, more than one additional chunk should have been handed out"); - assert(set1.length() == num_total_after_add, - err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they " - "need to be in the set, but there are only "SIZE_FORMAT, - num_total_after_add, set1.length())); - - size_t num_popped = 0; - while (set1.pop() != NULL) { - num_popped++; - } - assert(num_popped == num_total_after_add, - err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" " - "were added", num_popped, num_total_after_add)); - assert(mgr.num_chunks_handed_out() == 0, - err_msg("After popping all elements, all chunks must have been returned " - "but there are still "SIZE_FORMAT" additional", mgr.num_chunks_handed_out())); - - mgr.purge_chunks(0); - assert(mgr.num_free_chunks() == 0, - err_msg("After purging everything, the free list must be empty but still " - "contains "SIZE_FORMAT" chunks", mgr.num_free_chunks())); - - // Add some more handed out chunks. - size_t i = 0; - while (mgr.num_chunks_handed_out() < num_chunks) { - set1.add((nmethod*)i); - i++; - } + assert(set1.length() == 1, err_msg("Added exactly one element, but set contains " + SIZE_FORMAT" elements", set1.length())); - { - // Generate chunks on the free list. - G1CodeRootSet set2(&mgr); - size_t i = 0; - while (mgr.num_chunks_handed_out() < (num_chunks * 2)) { - set2.add((nmethod*)i); - i++; + const size_t num_to_add = (size_t)G1CodeRootSet::Threshold + 1; + + for (size_t i = 1; i <= num_to_add; i++) { + set1.add((nmethod*)1); + } + assert(set1.length() == 1, + err_msg("Duplicate detection should not have increased the set size but " + "is "SIZE_FORMAT, set1.length())); + + for (size_t i = 2; i <= num_to_add; i++) { + set1.add((nmethod*)(uintptr_t)(i)); + } + assert(set1.length() == num_to_add, + err_msg("After adding in total "SIZE_FORMAT" distinct code roots, they " + "need to be in the set, but there are only "SIZE_FORMAT, + num_to_add, set1.length())); + + assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable"); + + size_t num_popped = 0; + for (size_t i = 1; i <= num_to_add; i++) { + bool removed = set1.remove((nmethod*)i); + if (removed) { + num_popped += 1; + } else { + break; + } } - // Exit of the scope of the set2 object will call the destructor that generates - // num_chunks elements on the free list. + assert(num_popped == num_to_add, + err_msg("Managed to pop "SIZE_FORMAT" code roots, but only "SIZE_FORMAT" " + "were added", num_popped, num_to_add)); + assert(CodeRootSetTable::_purge_list != NULL, "should have grown to large hashtable"); + + G1CodeRootSet::purge(); + + assert(CodeRootSetTable::_purge_list == NULL, "should have purged old small tables"); + } - assert(mgr.num_chunks_handed_out() == num_chunks, - err_msg("Deletion of the second set must have resulted in giving back " - "those, but there are still "SIZE_FORMAT" additional handed out, expecting " - SIZE_FORMAT, mgr.num_chunks_handed_out(), num_chunks)); - assert(mgr.num_free_chunks() == num_chunks, - err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list " - "but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks())); - - size_t const test_percentage = 50; - mgr.purge_chunks(test_percentage); - assert(mgr.num_chunks_handed_out() == num_chunks, - err_msg("Purging must not hand out chunks but there are "SIZE_FORMAT, - mgr.num_chunks_handed_out())); - assert(mgr.num_free_chunks() == (size_t)(mgr.num_chunks_handed_out() * test_percentage / 100), - err_msg("Must have purged "SIZE_FORMAT" percent of "SIZE_FORMAT" chunks" - "but there are "SIZE_FORMAT, test_percentage, num_chunks, - mgr.num_free_chunks())); - // Purge the remainder of the chunks on the free list. - mgr.purge_chunks(0); - assert(mgr.num_free_chunks() == 0, "Free List must be empty"); - assert(mgr.num_chunks_handed_out() == num_chunks, - err_msg("Expected to be "SIZE_FORMAT" chunks handed out from the first set " - "but there are "SIZE_FORMAT, num_chunks, mgr.num_chunks_handed_out())); - - // Exit of the scope of the set1 object will call the destructor that generates - // num_chunks additional elements on the free list. - } - - assert(mgr.num_chunks_handed_out() == 0, - err_msg("Deletion of the only set must have resulted in no chunks handed " - "out, but there is still "SIZE_FORMAT" handed out", mgr.num_chunks_handed_out())); - assert(mgr.num_free_chunks() == num_chunks, - err_msg("After freeing "SIZE_FORMAT" chunks, they must be on the free list " - "but there are only "SIZE_FORMAT, num_chunks, mgr.num_free_chunks())); - - // Restore initial state. - mgr.purge_chunks(0); - assert(mgr.num_free_chunks() == 0, "Free List must be empty"); - assert(mgr.num_chunks_handed_out() == 0, "No additional elements must have been handed out yet"); -} + } +}; void TestCodeCacheRemSet_test() { - G1CodeRootSet::test(); + G1CodeRootSetTest::test(); } + #endif diff --git a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp index c351330f12f0dcc0cc67d24814f13965b83a3efe..87eb52a5b64152d7cb7adbeee0f9262c9ce54e84 100644 --- a/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp +++ b/src/share/vm/gc_implementation/g1/g1CodeCacheRemSet.hpp @@ -26,222 +26,64 @@ #define SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP #include "memory/allocation.hpp" -#include "memory/freeList.hpp" -#include "runtime/globals.hpp" class CodeBlobClosure; - -// The elements of the G1CodeRootChunk is either: -// 1) nmethod pointers -// 2) nodes in an internally chained free list -typedef union { - nmethod* _nmethod; - void* _link; -} NmethodOrLink; - -class G1CodeRootChunk : public CHeapObj { - private: - static const int NUM_ENTRIES = 32; - public: - G1CodeRootChunk* _next; - G1CodeRootChunk* _prev; - - NmethodOrLink* _top; - // First free position within the chunk. - volatile NmethodOrLink* _free; - - NmethodOrLink _data[NUM_ENTRIES]; - - NmethodOrLink* bottom() const { - return (NmethodOrLink*) &(_data[0]); - } - - NmethodOrLink* end() const { - return (NmethodOrLink*) &(_data[NUM_ENTRIES]); - } - - bool is_link(NmethodOrLink* nmethod_or_link) { - return nmethod_or_link->_link == NULL || - (bottom() <= nmethod_or_link->_link - && nmethod_or_link->_link < end()); - } - - bool is_nmethod(NmethodOrLink* nmethod_or_link) { - return !is_link(nmethod_or_link); - } - - public: - G1CodeRootChunk(); - ~G1CodeRootChunk() {} - - static size_t word_size() { return (size_t)(align_size_up_(sizeof(G1CodeRootChunk), HeapWordSize) / HeapWordSize); } - - // FreeList "interface" methods - - G1CodeRootChunk* next() const { return _next; } - G1CodeRootChunk* prev() const { return _prev; } - void set_next(G1CodeRootChunk* v) { _next = v; assert(v != this, "Boom");} - void set_prev(G1CodeRootChunk* v) { _prev = v; assert(v != this, "Boom");} - void clear_next() { set_next(NULL); } - void clear_prev() { set_prev(NULL); } - - size_t size() const { return word_size(); } - - void link_next(G1CodeRootChunk* ptr) { set_next(ptr); } - void link_prev(G1CodeRootChunk* ptr) { set_prev(ptr); } - void link_after(G1CodeRootChunk* ptr) { - link_next(ptr); - if (ptr != NULL) ptr->link_prev((G1CodeRootChunk*)this); - } - - bool is_free() { return true; } - - // New G1CodeRootChunk routines - - void reset(); - - bool is_empty() const { - return _top == bottom(); - } - - bool is_full() const { - return _top == end() && _free == NULL; - } - - bool contains(nmethod* method) { - NmethodOrLink* cur = bottom(); - while (cur != _top) { - if (cur->_nmethod == method) return true; - cur++; - } - return false; - } - - bool add(nmethod* method) { - if (is_full()) { - return false; - } - - if (_free != NULL) { - // Take from internally chained free list - NmethodOrLink* first_free = (NmethodOrLink*)_free; - _free = (NmethodOrLink*)_free->_link; - first_free->_nmethod = method; - } else { - // Take from top. - _top->_nmethod = method; - _top++; - } - - return true; - } - - bool remove_lock_free(nmethod* method); - - void nmethods_do(CodeBlobClosure* blk); - - nmethod* pop() { - if (_free != NULL) { - // Kill the free list. - _free = NULL; - } - - while (!is_empty()) { - _top--; - if (is_nmethod(_top)) { - return _top->_nmethod; - } - } - - return NULL; - } -}; - -// Manages free chunks. -class G1CodeRootChunkManager VALUE_OBJ_CLASS_SPEC { - private: - // Global free chunk list management - FreeList _free_list; - // Total number of chunks handed out - size_t _num_chunks_handed_out; - - public: - G1CodeRootChunkManager(); - - G1CodeRootChunk* new_chunk(); - void free_chunk(G1CodeRootChunk* chunk); - // Free all elements of the given list. - void free_all_chunks(FreeList* list); - - void initialize(); - void purge_chunks(size_t keep_ratio); - - static size_t static_mem_size(); - size_t fl_mem_size(); - -#ifndef PRODUCT - size_t num_chunks_handed_out() const; - size_t num_free_chunks() const; -#endif -}; +class CodeRootSetTable; +class HeapRegion; +class nmethod; // Implements storage for a set of code roots. // All methods that modify the set are not thread-safe except if otherwise noted. class G1CodeRootSet VALUE_OBJ_CLASS_SPEC { + friend class G1CodeRootSetTest; private: - // Global default free chunk manager instance. - static G1CodeRootChunkManager _default_chunk_manager; - G1CodeRootChunk* new_chunk() { return _manager->new_chunk(); } - void free_chunk(G1CodeRootChunk* chunk) { _manager->free_chunk(chunk); } - // Free all elements of the given list. - void free_all_chunks(FreeList* list) { _manager->free_all_chunks(list); } + const static size_t SmallSize = 32; + const static size_t Threshold = 24; + const static size_t LargeSize = 512; - // Return the chunk that contains the given nmethod, NULL otherwise. - // Scans the list of chunks backwards, as this method is used to add new - // entries, which are typically added in bulk for a single nmethod. - G1CodeRootChunk* find(nmethod* method); - void free(G1CodeRootChunk* chunk); + CodeRootSetTable* _table; + CodeRootSetTable* load_acquire_table(); size_t _length; - FreeList _list; - G1CodeRootChunkManager* _manager; + + void move_to_large(); + void allocate_small_table(); public: - // If an instance is initialized with a chunk manager of NULL, use the global - // default one. - G1CodeRootSet(G1CodeRootChunkManager* manager = NULL); + G1CodeRootSet() : _table(NULL), _length(0) {} ~G1CodeRootSet(); - static void purge_chunks(size_t keep_ratio); + static void purge(); - static size_t free_chunks_static_mem_size(); - static size_t free_chunks_mem_size(); + static size_t static_mem_size(); - // Search for the code blob from the recently allocated ones to find duplicates more quickly, as this - // method is likely to be repeatedly called with the same nmethod. void add(nmethod* method); - void remove_lock_free(nmethod* method); - nmethod* pop(); + bool remove(nmethod* method); + // Safe to call without synchronization, but may return false negatives. bool contains(nmethod* method); void clear(); void nmethods_do(CodeBlobClosure* blk) const; - bool is_empty() { return length() == 0; } + // Remove all nmethods which no longer contain pointers into our "owner" region + void clean(HeapRegion* owner); + + bool is_empty() { + bool empty = length() == 0; + assert(empty == (_table == NULL), "is empty only if table is deallocated"); + return empty; + } // Length in elements size_t length() const { return _length; } - // Static data memory size in bytes of this set. - static size_t static_mem_size(); // Memory size in bytes taken by this set. size_t mem_size(); - static void test() PRODUCT_RETURN; }; #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1CODECACHEREMSET_HPP diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index 9bd99122e9f3a49b27a9ba91f720c84e58e8ca61..3f18304dbc475faf038264e0394953d8c8bb2299 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -4580,6 +4580,56 @@ class G1KlassScanClosure : public KlassClosure { } }; +class G1CodeBlobClosure : public CodeBlobClosure { + class HeapRegionGatheringOopClosure : public OopClosure { + G1CollectedHeap* _g1h; + OopClosure* _work; + nmethod* _nm; + + template + void do_oop_work(T* p) { + _work->do_oop(p); + T oop_or_narrowoop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(oop_or_narrowoop)) { + oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop); + HeapRegion* hr = _g1h->heap_region_containing_raw(o); + assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset"); + hr->add_strong_code_root(_nm); + } + } + + public: + HeapRegionGatheringOopClosure(OopClosure* oc) : _g1h(G1CollectedHeap::heap()), _work(oc), _nm(NULL) {} + + void do_oop(oop* o) { + do_oop_work(o); + } + + void do_oop(narrowOop* o) { + do_oop_work(o); + } + + void set_nm(nmethod* nm) { + _nm = nm; + } + }; + + HeapRegionGatheringOopClosure _oc; +public: + G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {} + + void do_code_blob(CodeBlob* cb) { + nmethod* nm = cb->as_nmethod_or_null(); + if (nm != NULL) { + if (!nm->test_set_oops_do_mark()) { + _oc.set_nm(nm); + nm->oops_do(&_oc); + nm->fix_oop_relocations(); + } + } + } +}; + class G1ParTask : public AbstractGangTask { protected: G1CollectedHeap* _g1h; @@ -4648,22 +4698,6 @@ public: } }; - class G1CodeBlobClosure: public CodeBlobClosure { - OopClosure* _f; - - public: - G1CodeBlobClosure(OopClosure* f) : _f(f) {} - void do_code_blob(CodeBlob* blob) { - nmethod* that = blob->as_nmethod_or_null(); - if (that != NULL) { - if (!that->test_set_oops_do_mark()) { - that->oops_do(_f); - that->fix_oop_relocations(); - } - } - } - }; - void work(uint worker_id) { if (worker_id >= _n_workers) return; // no work needed this round @@ -4854,7 +4888,7 @@ g1_process_roots(OopClosure* scan_non_heap_roots, g1_policy()->phase_times()->record_satb_filtering_time(worker_i, satb_filtering_ms); // Now scan the complement of the collection set. - MarkingCodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots, CodeBlobToOopClosure::FixRelocations); + G1CodeBlobClosure scavenge_cs_nmethods(scan_non_heap_weak_roots); g1_rem_set()->oops_into_collection_set_do(scan_rs, &scavenge_cs_nmethods, worker_i); @@ -5901,12 +5935,6 @@ void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) { hot_card_cache->reset_hot_cache(); hot_card_cache->set_use_cache(true); - // Migrate the strong code roots attached to each region in - // the collection set. Ideally we would like to do this - // after we have finished the scanning/evacuation of the - // strong code roots for a particular heap region. - migrate_strong_code_roots(); - purge_code_root_memory(); if (g1_policy()->during_initial_mark_pause()) { @@ -6902,13 +6930,8 @@ class RegisterNMethodOopClosure: public OopClosure { " starting at "HR_FORMAT, _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); - // HeapRegion::add_strong_code_root() avoids adding duplicate - // entries but having duplicates is OK since we "mark" nmethods - // as visited when we scan the strong code root lists during the GC. - hr->add_strong_code_root(_nm); - assert(hr->rem_set()->strong_code_roots_list_contains(_nm), - err_msg("failed to add code root "PTR_FORMAT" to remembered set of region "HR_FORMAT, - _nm, HR_FORMAT_PARAMS(hr))); + // HeapRegion::add_strong_code_root_locked() avoids adding duplicate entries. + hr->add_strong_code_root_locked(_nm); } } @@ -6935,9 +6958,6 @@ class UnregisterNMethodOopClosure: public OopClosure { _nm, HR_FORMAT_PARAMS(hr), HR_FORMAT_PARAMS(hr->humongous_start_region()))); hr->remove_strong_code_root(_nm); - assert(!hr->rem_set()->strong_code_roots_list_contains(_nm), - err_msg("failed to remove code root "PTR_FORMAT" of region "HR_FORMAT, - _nm, HR_FORMAT_PARAMS(hr))); } } @@ -6965,28 +6985,9 @@ void G1CollectedHeap::unregister_nmethod(nmethod* nm) { nm->oops_do(®_cl, true); } -class MigrateCodeRootsHeapRegionClosure: public HeapRegionClosure { -public: - bool doHeapRegion(HeapRegion *hr) { - assert(!hr->isHumongous(), - err_msg("humongous region "HR_FORMAT" should not have been added to collection set", - HR_FORMAT_PARAMS(hr))); - hr->migrate_strong_code_roots(); - return false; - } -}; - -void G1CollectedHeap::migrate_strong_code_roots() { - MigrateCodeRootsHeapRegionClosure cl; - double migrate_start = os::elapsedTime(); - collection_set_iterate(&cl); - double migration_time_ms = (os::elapsedTime() - migrate_start) * 1000.0; - g1_policy()->phase_times()->record_strong_code_root_migration_time(migration_time_ms); -} - void G1CollectedHeap::purge_code_root_memory() { double purge_start = os::elapsedTime(); - G1CodeRootSet::purge_chunks(G1CodeRootsChunkCacheKeepPercent); + G1CodeRootSet::purge(); double purge_time_ms = (os::elapsedTime() - purge_start) * 1000.0; g1_policy()->phase_times()->record_strong_code_root_purge_time(purge_time_ms); } diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp index 2ca629e6007ddd9a15c51313375076e9456d59f4..b1110da9ecf2edaeb65bbb6b4bf9af3dd471b49a 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp @@ -1,4 +1,4 @@ -/* + /* * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -1633,12 +1633,6 @@ public: // Unregister the given nmethod from the G1 heap virtual void unregister_nmethod(nmethod* nm); - // Migrate the nmethods in the code root lists of the regions - // in the collection set to regions in to-space. In the event - // of an evacuation failure, nmethods that reference objects - // that were not successfullly evacuated are not migrated. - void migrate_strong_code_roots(); - // Free up superfluous code root memory. void purge_code_root_memory(); diff --git a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp index 4b25d904063b88425777b2d7fcac2fba86f3f82a..72d1ca179f89d7493330f7d0fbe533a83061ec4d 100644 --- a/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp +++ b/src/share/vm/gc_implementation/g1/g1EvacFailure.hpp @@ -217,6 +217,8 @@ public: _update_rset_cl->set_region(hr); hr->object_iterate(&rspc); + hr->rem_set()->clean_strong_code_roots(hr); + hr->note_self_forwarding_removal_end(during_initial_mark, during_conc_mark, rspc.marked_bytes()); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp index b18e6daf34637946bb98274b93ba559d529407e6..8e5b29e9ca100f49d2765f21c00f24f24ad96016 100644 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp @@ -274,9 +274,6 @@ double G1GCPhaseTimes::accounted_time_ms() { // Now subtract the time taken to fix up roots in generated code misc_time_ms += _cur_collection_code_root_fixup_time_ms; - // Strong code root migration time - misc_time_ms += _cur_strong_code_root_migration_time_ms; - // Strong code root purge time misc_time_ms += _cur_strong_code_root_purge_time_ms; @@ -327,7 +324,6 @@ void G1GCPhaseTimes::print(double pause_time_sec) { _last_obj_copy_times_ms.print(1, "Object Copy (ms)"); } print_stats(1, "Code Root Fixup", _cur_collection_code_root_fixup_time_ms); - print_stats(1, "Code Root Migration", _cur_strong_code_root_migration_time_ms); print_stats(1, "Code Root Purge", _cur_strong_code_root_purge_time_ms); if (G1StringDedup::is_enabled()) { print_stats(1, "String Dedup Fixup", _cur_string_dedup_fixup_time_ms, _active_gc_threads); diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp index 4237c972a557f78e54786231fba374c70b9e3873..8421eb07b6ab3784aa2931e742a3e51dc9e7b207 100644 --- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp +++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp @@ -129,7 +129,6 @@ class G1GCPhaseTimes : public CHeapObj { double _cur_collection_par_time_ms; double _cur_collection_code_root_fixup_time_ms; - double _cur_strong_code_root_migration_time_ms; double _cur_strong_code_root_purge_time_ms; double _cur_evac_fail_recalc_used; @@ -233,10 +232,6 @@ class G1GCPhaseTimes : public CHeapObj { _cur_collection_code_root_fixup_time_ms = ms; } - void record_strong_code_root_migration_time(double ms) { - _cur_strong_code_root_migration_time_ms = ms; - } - void record_strong_code_root_purge_time(double ms) { _cur_strong_code_root_purge_time_ms = ms; } diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.cpp b/src/share/vm/gc_implementation/g1/g1RemSet.cpp index db406ce1747e34abb9554e87858ff3b3e8f7c69a..42376213c609e11998ca91711c5e344752aed79a 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp @@ -109,7 +109,7 @@ class ScanRSClosure : public HeapRegionClosure { G1CollectedHeap* _g1h; OopsInHeapRegionClosure* _oc; - CodeBlobToOopClosure* _code_root_cl; + CodeBlobClosure* _code_root_cl; G1BlockOffsetSharedArray* _bot_shared; G1SATBCardTableModRefBS *_ct_bs; @@ -121,7 +121,7 @@ class ScanRSClosure : public HeapRegionClosure { public: ScanRSClosure(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i) : _oc(oc), _code_root_cl(code_root_cl), @@ -241,7 +241,7 @@ public: }; void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i) { double rs_time_start = os::elapsedTime(); HeapRegion *startRegion = _g1->start_cset_region_for_worker(worker_i); @@ -320,7 +320,7 @@ void G1RemSet::cleanupHRRS() { } void G1RemSet::oops_into_collection_set_do(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i) { #if CARD_REPEAT_HISTO ct_freq_update_histo_and_reset(); diff --git a/src/share/vm/gc_implementation/g1/g1RemSet.hpp b/src/share/vm/gc_implementation/g1/g1RemSet.hpp index 81e855935095bebae97832e2056f5d274d1d2110..35279a52e201f6e5977b3c7a378c309c37149edb 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSet.hpp +++ b/src/share/vm/gc_implementation/g1/g1RemSet.hpp @@ -96,7 +96,7 @@ public: // the "i" passed to the calling thread's work(i) function. // In the sequential case this param will be ignored. void oops_into_collection_set_do(OopsInHeapRegionClosure* blk, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i); // Prepare for and cleanup after an oops_into_collection_set_do @@ -108,7 +108,7 @@ public: void cleanup_after_oops_into_collection_set_do(); void scanRS(OopsInHeapRegionClosure* oc, - CodeBlobToOopClosure* code_root_cl, + CodeBlobClosure* code_root_cl, uint worker_i); void updateRS(DirtyCardQueue* into_cset_dcq, uint worker_i); diff --git a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp index 062d7abe2c58dc46f37365743dab18a6fb07e5eb..c55165bdc7bdd579c1759b1442e7984a548e15f6 100644 --- a/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp +++ b/src/share/vm/gc_implementation/g1/g1RemSetSummary.cpp @@ -253,6 +253,7 @@ public: size_t occupied_cards = hrrs->occupied(); size_t code_root_mem_sz = hrrs->strong_code_roots_mem_size(); if (code_root_mem_sz > max_code_root_mem_sz()) { + _max_code_root_mem_sz = code_root_mem_sz; _max_code_root_mem_sz_region = r; } size_t code_root_elems = hrrs->strong_code_roots_list_length(); diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp index a14d7f057d79447e7a9c5266e1e16ec80fb56214..8a3ea9d75687cb1aa59cb02e910758e441c89a8c 100644 --- a/src/share/vm/gc_implementation/g1/g1_globals.hpp +++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp @@ -285,10 +285,6 @@ product(uintx, G1MixedGCCountTarget, 8, \ "The target number of mixed GCs after a marking cycle.") \ \ - experimental(uintx, G1CodeRootsChunkCacheKeepPercent, 10, \ - "The amount of code root chunks that should be kept at most " \ - "as percentage of already allocated.") \ - \ experimental(bool, G1ReclaimDeadHumongousObjectsAtYoungGC, true, \ "Try to reclaim dead large objects at every young GC.") \ \ diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp index a98d687309e269e544c2476dc834a124e2645d60..d5880db4e414a94d7939a74f6b133a1afd00d6fe 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp @@ -549,19 +549,15 @@ void HeapRegion::add_strong_code_root(nmethod* nm) { hrrs->add_strong_code_root(nm); } -void HeapRegion::remove_strong_code_root(nmethod* nm) { +void HeapRegion::add_strong_code_root_locked(nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); HeapRegionRemSet* hrrs = rem_set(); - hrrs->remove_strong_code_root(nm); + hrrs->add_strong_code_root_locked(nm); } -void HeapRegion::migrate_strong_code_roots() { - assert(in_collection_set(), "only collection set regions"); - assert(!isHumongous(), - err_msg("humongous region "HR_FORMAT" should not have been added to collection set", - HR_FORMAT_PARAMS(this))); - +void HeapRegion::remove_strong_code_root(nmethod* nm) { HeapRegionRemSet* hrrs = rem_set(); - hrrs->migrate_strong_code_roots(); + hrrs->remove_strong_code_root(nm); } void HeapRegion::strong_code_roots_do(CodeBlobClosure* blk) const { diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp index 8f19eb0b5be0d71189c7b977a09969c85a801a30..41a8adea2e3050bf5a22dc081ef512c1080c0a3a 100644 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp @@ -772,14 +772,9 @@ class HeapRegion: public G1OffsetTableContigSpace { // Routines for managing a list of code roots (attached to the // this region's RSet) that point into this heap region. void add_strong_code_root(nmethod* nm); + void add_strong_code_root_locked(nmethod* nm); void remove_strong_code_root(nmethod* nm); - // During a collection, migrate the successfully evacuated - // strong code roots that referenced into this region to the - // new regions that they now point into. Unsuccessfully - // evacuated code roots are not migrated. - void migrate_strong_code_roots(); - // Applies blk->do_code_blob() to each of the entries in // the strong code roots list for this region void strong_code_roots_do(CodeBlobClosure* blk) const; diff --git a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp index b8adf1e35eb71809d1be63f2b0f10fbe68cc9a01..ec0249ea468dcdd6f7e28894f682b8a06f231df7 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp @@ -923,8 +923,24 @@ void HeapRegionRemSet::scrub(CardTableModRefBS* ctbs, } // Code roots support +// +// The code root set is protected by two separate locking schemes +// When at safepoint the per-hrrs lock must be held during modifications +// except when doing a full gc. +// When not at safepoint the CodeCache_lock must be held during modifications. +// When concurrent readers access the contains() function +// (during the evacuation phase) no removals are allowed. void HeapRegionRemSet::add_strong_code_root(nmethod* nm) { + assert(nm != NULL, "sanity"); + // Optimistic unlocked contains-check + if (!_code_roots.contains(nm)) { + MutexLockerEx ml(&_m, Mutex::_no_safepoint_check_flag); + add_strong_code_root_locked(nm); + } +} + +void HeapRegionRemSet::add_strong_code_root_locked(nmethod* nm) { assert(nm != NULL, "sanity"); _code_roots.add(nm); } @@ -933,98 +949,21 @@ void HeapRegionRemSet::remove_strong_code_root(nmethod* nm) { assert(nm != NULL, "sanity"); assert_locked_or_safepoint(CodeCache_lock); - _code_roots.remove_lock_free(nm); + MutexLockerEx ml(CodeCache_lock->owned_by_self() ? NULL : &_m, Mutex::_no_safepoint_check_flag); + _code_roots.remove(nm); // Check that there were no duplicates guarantee(!_code_roots.contains(nm), "duplicate entry found"); } -class NMethodMigrationOopClosure : public OopClosure { - G1CollectedHeap* _g1h; - HeapRegion* _from; - nmethod* _nm; - - uint _num_self_forwarded; - - template void do_oop_work(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (_from->is_in(obj)) { - // Reference still points into the source region. - // Since roots are immediately evacuated this means that - // we must have self forwarded the object - assert(obj->is_forwarded(), - err_msg("code roots should be immediately evacuated. " - "Ref: "PTR_FORMAT", " - "Obj: "PTR_FORMAT", " - "Region: "HR_FORMAT, - p, (void*) obj, HR_FORMAT_PARAMS(_from))); - assert(obj->forwardee() == obj, - err_msg("not self forwarded? obj = "PTR_FORMAT, (void*)obj)); - - // The object has been self forwarded. - // Note, if we're during an initial mark pause, there is - // no need to explicitly mark object. It will be marked - // during the regular evacuation failure handling code. - _num_self_forwarded++; - } else { - // The reference points into a promotion or to-space region - HeapRegion* to = _g1h->heap_region_containing(obj); - to->rem_set()->add_strong_code_root(_nm); - } - } - } - -public: - NMethodMigrationOopClosure(G1CollectedHeap* g1h, HeapRegion* from, nmethod* nm): - _g1h(g1h), _from(from), _nm(nm), _num_self_forwarded(0) {} - - void do_oop(narrowOop* p) { do_oop_work(p); } - void do_oop(oop* p) { do_oop_work(p); } - - uint retain() { return _num_self_forwarded > 0; } -}; - -void HeapRegionRemSet::migrate_strong_code_roots() { - assert(hr()->in_collection_set(), "only collection set regions"); - assert(!hr()->isHumongous(), - err_msg("humongous region "HR_FORMAT" should not have been added to the collection set", - HR_FORMAT_PARAMS(hr()))); - - ResourceMark rm; - - // List of code blobs to retain for this region - GrowableArray to_be_retained(10); - G1CollectedHeap* g1h = G1CollectedHeap::heap(); - - while (!_code_roots.is_empty()) { - nmethod *nm = _code_roots.pop(); - if (nm != NULL) { - NMethodMigrationOopClosure oop_cl(g1h, hr(), nm); - nm->oops_do(&oop_cl); - if (oop_cl.retain()) { - to_be_retained.push(nm); - } - } - } - - // Now push any code roots we need to retain - assert(to_be_retained.is_empty() || hr()->evacuation_failed(), - "Retained nmethod list must be empty or " - "evacuation of this region failed"); - - while (to_be_retained.is_nonempty()) { - nmethod* nm = to_be_retained.pop(); - assert(nm != NULL, "sanity"); - add_strong_code_root(nm); - } -} - void HeapRegionRemSet::strong_code_roots_do(CodeBlobClosure* blk) const { _code_roots.nmethods_do(blk); } +void HeapRegionRemSet::clean_strong_code_roots(HeapRegion* hr) { + _code_roots.clean(hr); +} + size_t HeapRegionRemSet::strong_code_roots_mem_size() { return _code_roots.mem_size(); } diff --git a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp index 48b53d08cae7db3f224119e1e68247e65988ddc4..2d5c71b227a9636dd8447cf89a65c08b05c24a8b 100644 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp @@ -353,13 +353,13 @@ public: // Returns the memory occupancy of all static data structures associated // with remembered sets. static size_t static_mem_size() { - return OtherRegionsTable::static_mem_size() + G1CodeRootSet::free_chunks_static_mem_size(); + return OtherRegionsTable::static_mem_size() + G1CodeRootSet::static_mem_size(); } // Returns the memory occupancy of all free_list data structures associated // with remembered sets. static size_t fl_mem_size() { - return OtherRegionsTable::fl_mem_size() + G1CodeRootSet::free_chunks_mem_size(); + return OtherRegionsTable::fl_mem_size(); } bool contains_reference(OopOrNarrowOopStar from) const { @@ -369,18 +369,15 @@ public: // Routines for managing the list of code roots that point into // the heap region that owns this RSet. void add_strong_code_root(nmethod* nm); + void add_strong_code_root_locked(nmethod* nm); void remove_strong_code_root(nmethod* nm); - // During a collection, migrate the successfully evacuated strong - // code roots that referenced into the region that owns this RSet - // to the RSets of the new regions that they now point into. - // Unsuccessfully evacuated code roots are not migrated. - void migrate_strong_code_roots(); - // Applies blk->do_code_blob() to each of the entries in // the strong code roots list void strong_code_roots_do(CodeBlobClosure* blk) const; + void clean_strong_code_roots(HeapRegion* hr); + // Returns the number of elements in the strong code roots list size_t strong_code_roots_list_length() const { return _code_roots.length(); diff --git a/src/share/vm/memory/freeList.cpp b/src/share/vm/memory/freeList.cpp index f1d4859a04004fd9f9b7b3c7d23e1b237ed47141..3ab6f09a8ae36d162106c4cec7f5b25e9635b245 100644 --- a/src/share/vm/memory/freeList.cpp +++ b/src/share/vm/memory/freeList.cpp @@ -34,7 +34,6 @@ #if INCLUDE_ALL_GCS #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp" -#include "gc_implementation/g1/g1CodeCacheRemSet.hpp" #endif // INCLUDE_ALL_GCS // Free list. A FreeList is used to access a linked list of chunks @@ -333,5 +332,4 @@ template class FreeList; template class FreeList; #if INCLUDE_ALL_GCS template class FreeList; -template class FreeList; #endif // INCLUDE_ALL_GCS diff --git a/src/share/vm/oops/method.cpp b/src/share/vm/oops/method.cpp index a544908641c50de6d4b03c5874e8b0da14ad8402..d67685a9888ec5f53b3c668c6eb3591cd677f7f8 100644 --- a/src/share/vm/oops/method.cpp +++ b/src/share/vm/oops/method.cpp @@ -93,7 +93,7 @@ Method::Method(ConstMethod* xconst, AccessFlags access_flags, int size) { set_hidden(false); set_dont_inline(false); set_method_data(NULL); - set_method_counters(NULL); + clear_method_counters(); set_vtable_index(Method::garbage_vtable_index); // Fix and bury in Method* @@ -117,7 +117,7 @@ void Method::deallocate_contents(ClassLoaderData* loader_data) { MetadataFactory::free_metadata(loader_data, method_data()); set_method_data(NULL); MetadataFactory::free_metadata(loader_data, method_counters()); - set_method_counters(NULL); + clear_method_counters(); // The nmethod will be gone when we get here. if (code() != NULL) _code = NULL; } @@ -388,9 +388,7 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) { methodHandle mh(m); ClassLoaderData* loader_data = mh->method_holder()->class_loader_data(); MethodCounters* counters = MethodCounters::allocate(loader_data, CHECK_NULL); - if (mh->method_counters() == NULL) { - mh->set_method_counters(counters); - } else { + if (!mh->init_method_counters(counters)) { MetadataFactory::free_metadata(loader_data, counters); } return mh->method_counters(); @@ -852,7 +850,7 @@ void Method::unlink_method() { assert(!DumpSharedSpaces || _method_data == NULL, "unexpected method data?"); set_method_data(NULL); - set_method_counters(NULL); + clear_method_counters(); } // Called when the method_holder is getting linked. Setup entrypoints so the method diff --git a/src/share/vm/oops/method.hpp b/src/share/vm/oops/method.hpp index 7e2db7f510e690f17769af819805679c8542e06f..8d92437e2911a448eb2ea5cb424b2c6ee858cae9 100644 --- a/src/share/vm/oops/method.hpp +++ b/src/share/vm/oops/method.hpp @@ -365,11 +365,13 @@ class Method : public Metadata { return _method_counters; } - void set_method_counters(MethodCounters* counters) { - // The store into method must be released. On platforms without - // total store order (TSO) the reference may become visible before - // the initialization of data otherwise. - OrderAccess::release_store_ptr((volatile void *)&_method_counters, counters); + void clear_method_counters() { + _method_counters = NULL; + } + + bool init_method_counters(MethodCounters* counters) { + // Try to install a pointer to MethodCounters, return true on success. + return Atomic::cmpxchg_ptr(counters, (volatile void*)&_method_counters, NULL) == NULL; } #ifdef TIERED diff --git a/src/share/vm/utilities/hashtable.cpp b/src/share/vm/utilities/hashtable.cpp index 40fb3b1537d8ac9a2236122436e1b114b66bf6f2..a60fe7666e13361ed38101d1ecbf02044bc9074b 100644 --- a/src/share/vm/utilities/hashtable.cpp +++ b/src/share/vm/utilities/hashtable.cpp @@ -36,21 +36,22 @@ #include "utilities/numberSeq.hpp" -// This is a generic hashtable, designed to be used for the symbol -// and string tables. -// -// It is implemented as an open hash table with a fixed number of buckets. -// -// %note: -// - HashtableEntrys are allocated in blocks to reduce the space overhead. - -template BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) { - BasicHashtableEntry* entry; +// This hashtable is implemented as an open hash table with a fixed number of buckets. - if (_free_list) { +template BasicHashtableEntry* BasicHashtable::new_entry_free_list() { + BasicHashtableEntry* entry = NULL; + if (_free_list != NULL) { entry = _free_list; _free_list = _free_list->next(); - } else { + } + return entry; +} + +// HashtableEntrys are allocated in blocks to reduce the space overhead. +template BasicHashtableEntry* BasicHashtable::new_entry(unsigned int hashValue) { + BasicHashtableEntry* entry = new_entry_free_list(); + + if (entry == NULL) { if (_first_free_entry + _entry_size >= _end_block) { int block_size = MIN2(512, MAX2((int)_table_size / 2, (int)_number_of_entries)); int len = _entry_size * block_size; @@ -83,9 +84,9 @@ template HashtableEntry* Hashtable::new_entry( // This is somewhat an arbitrary heuristic but if one bucket gets to // rehash_count which is currently 100, there's probably something wrong. -template bool BasicHashtable::check_rehash_table(int count) { - assert(table_size() != 0, "underflow"); - if (count > (((double)number_of_entries()/(double)table_size())*rehash_multiple)) { +template bool RehashableHashtable::check_rehash_table(int count) { + assert(this->table_size() != 0, "underflow"); + if (count > (((double)this->number_of_entries()/(double)this->table_size())*rehash_multiple)) { // Set a flag for the next safepoint, which should be at some guaranteed // safepoint interval. return true; @@ -93,13 +94,13 @@ template bool BasicHashtable::check_rehash_table(int count) { return false; } -template juint Hashtable::_seed = 0; +template juint RehashableHashtable::_seed = 0; // Create a new table and using alternate hash code, populate the new table // with the existing elements. This can be used to change the hash code // and could in the future change the size of the table. -template void Hashtable::move_to(Hashtable* new_table) { +template void RehashableHashtable::move_to(RehashableHashtable* new_table) { // Initialize the global seed for hashing. _seed = AltHashing::compute_seed(); @@ -109,7 +110,7 @@ template void Hashtable::move_to(Hashtable* ne // Iterate through the table and create a new entry for the new table for (int i = 0; i < new_table->table_size(); ++i) { - for (HashtableEntry* p = bucket(i); p != NULL; ) { + for (HashtableEntry* p = this->bucket(i); p != NULL; ) { HashtableEntry* next = p->next(); T string = p->literal(); // Use alternate hashing algorithm on the symbol in the first table @@ -238,11 +239,11 @@ template void Hashtable::reverse(void* boundary) { } } -template int Hashtable::literal_size(Symbol *symbol) { +template int RehashableHashtable::literal_size(Symbol *symbol) { return symbol->size() * HeapWordSize; } -template int Hashtable::literal_size(oop oop) { +template int RehashableHashtable::literal_size(oop oop) { // NOTE: this would over-count if (pre-JDK8) java_lang_Class::has_offset_field() is true, // and the String.value array is shared by several Strings. However, starting from JDK8, // the String.value array is not shared anymore. @@ -255,12 +256,12 @@ template int Hashtable::literal_size(oop oop) { // Note: if you create a new subclass of Hashtable, you will need to // add a new function Hashtable::literal_size(MyNewType lit) -template void Hashtable::dump_table(outputStream* st, const char *table_name) { +template void RehashableHashtable::dump_table(outputStream* st, const char *table_name) { NumberSeq summary; int literal_bytes = 0; for (int i = 0; i < this->table_size(); ++i) { int count = 0; - for (HashtableEntry* e = bucket(i); + for (HashtableEntry* e = this->bucket(i); e != NULL; e = e->next()) { count++; literal_bytes += literal_size(e->literal()); @@ -270,7 +271,7 @@ template void Hashtable::dump_table(outputStream* st double num_buckets = summary.num(); double num_entries = summary.sum(); - int bucket_bytes = (int)num_buckets * sizeof(bucket(0)); + int bucket_bytes = (int)num_buckets * sizeof(HashtableBucket); int entry_bytes = (int)num_entries * sizeof(HashtableEntry); int total_bytes = literal_bytes + bucket_bytes + entry_bytes; @@ -352,12 +353,20 @@ template void BasicHashtable::verify_lookup_length(double load) #endif // Explicitly instantiate these types +#if INCLUDE_ALL_GCS +template class Hashtable; +template class HashtableEntry; +template class BasicHashtable; +#endif template class Hashtable; +template class RehashableHashtable; +template class RehashableHashtable; template class Hashtable; template class Hashtable; template class Hashtable; #if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS) template class Hashtable; +template class RehashableHashtable; #endif // SOLARIS || CHECK_UNHANDLED_OOPS template class Hashtable; template class Hashtable; diff --git a/src/share/vm/utilities/hashtable.hpp b/src/share/vm/utilities/hashtable.hpp index aa45100249c35f03c006700078abe4015eee2a07..6244c4c402e578911a14b7e6af4a85a1bf9a499e 100644 --- a/src/share/vm/utilities/hashtable.hpp +++ b/src/share/vm/utilities/hashtable.hpp @@ -178,11 +178,6 @@ protected: void verify_lookup_length(double load); #endif - enum { - rehash_count = 100, - rehash_multiple = 60 - }; - void initialize(int table_size, int entry_size, int number_of_entries); // Accessor @@ -194,12 +189,12 @@ protected: // The following method is not MT-safe and must be done under lock. BasicHashtableEntry** bucket_addr(int i) { return _buckets[i].entry_addr(); } + // Attempt to get an entry from the free list + BasicHashtableEntry* new_entry_free_list(); + // Table entry management BasicHashtableEntry* new_entry(unsigned int hashValue); - // Check that the table is unbalanced - bool check_rehash_table(int count); - // Used when moving the entry to another table // Clean up links, but do not add to free_list void unlink_entry(BasicHashtableEntry* entry) { @@ -277,8 +272,30 @@ protected: return (HashtableEntry**)BasicHashtable::bucket_addr(i); } +}; + +template class RehashableHashtable : public Hashtable { + protected: + + enum { + rehash_count = 100, + rehash_multiple = 60 + }; + + // Check that the table is unbalanced + bool check_rehash_table(int count); + + public: + RehashableHashtable(int table_size, int entry_size) + : Hashtable(table_size, entry_size) { } + + RehashableHashtable(int table_size, int entry_size, + HashtableBucket* buckets, int number_of_entries) + : Hashtable(table_size, entry_size, buckets, number_of_entries) { } + + // Function to move these elements into the new table. - void move_to(Hashtable* new_table); + void move_to(RehashableHashtable* new_table); static bool use_alternate_hashcode() { return _seed != 0; } static juint seed() { return _seed; } @@ -292,7 +309,6 @@ protected: static int literal_size(ConstantPool *cp) {Unimplemented(); return 0;} static int literal_size(Klass *k) {Unimplemented(); return 0;} -public: void dump_table(outputStream* st, const char *table_name); private: diff --git a/test/Makefile b/test/Makefile index f81cc8d64df6b5fde19866af38b13ffeeb0e20b8..5364574181c74097be7e39c66ab8dc51b46371c5 100644 --- a/test/Makefile +++ b/test/Makefile @@ -180,8 +180,8 @@ ifdef TESTDIRS JTREG_TESTDIRS = $(TESTDIRS) endif -# Default JTREG to run (win32 script works for everybody) -JTREG = $(JT_HOME)/win32/bin/jtreg +# Default JTREG to run +JTREG = $(JT_HOME)/bin/jtreg # Option to tell jtreg to not run tests marked with "ignore" ifeq ($(PLATFORM), windows) diff --git a/test/compiler/unsafe/UnsafeRaw.java b/test/compiler/unsafe/UnsafeRaw.java new file mode 100644 index 0000000000000000000000000000000000000000..5d172a5f4a9dc547733d8434364f57faf835045e --- /dev/null +++ b/test/compiler/unsafe/UnsafeRaw.java @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test + * @bug 8058744 + * @summary Invalid pattern-matching of address computations in raw unsafe + * @library /testlibrary + * @run main/othervm -Xbatch UnsafeRaw + */ + +import com.oracle.java.testlibrary.Utils; +import java.util.Random; + +public class UnsafeRaw { + public static class Tests { + public static int int_index(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index << 2)); + } + public static int long_index(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index << 2)); + } + public static int int_index_back_ashift(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index >> 2)); + } + public static int int_index_back_lshift(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index >>> 2)); + } + public static int long_index_back_ashift(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index >> 2)); + } + public static int long_index_back_lshift(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index >>> 2)); + } + public static int int_const_12345678_index(sun.misc.Unsafe unsafe, long base) throws Exception { + int idx4 = 0x12345678; + return unsafe.getInt(base + idx4); + } + public static int long_const_1234567890abcdef_index(sun.misc.Unsafe unsafe, long base) throws Exception { + long idx5 = 0x1234567890abcdefL; + return unsafe.getInt(base + idx5); + } + public static int int_index_mul(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index * 4)); + } + public static int long_index_mul(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index * 4)); + } + public static int int_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, int index) throws Exception { + return unsafe.getInt(base + (index * 16)); + } + public static int long_index_mul_scale_16(sun.misc.Unsafe unsafe, long base, long index) throws Exception { + return unsafe.getInt(base + (index * 16)); + } + } + + public static void main(String[] args) throws Exception { + sun.misc.Unsafe unsafe = Utils.getUnsafe(); + final int array_size = 128; + final int element_size = 4; + final int magic = 0x12345678; + + Random rnd = new Random(); + + long array = unsafe.allocateMemory(array_size * element_size); // 128 ints + long addr = array + array_size * element_size / 2; // something in the middle to work with + unsafe.putInt(addr, magic); + for (int j = 0; j < 100000; j++) { + if (Tests.int_index(unsafe, addr, 0) != magic) throw new Exception(); + if (Tests.long_index(unsafe, addr, 0) != magic) throw new Exception(); + if (Tests.int_index_mul(unsafe, addr, 0) != magic) throw new Exception(); + if (Tests.long_index_mul(unsafe, addr, 0) != magic) throw new Exception(); + { + long idx1 = rnd.nextLong(); + long addr1 = addr - (idx1 << 2); + if (Tests.long_index(unsafe, addr1, idx1) != magic) throw new Exception(); + } + { + long idx2 = rnd.nextLong(); + long addr2 = addr - (idx2 >> 2); + if (Tests.long_index_back_ashift(unsafe, addr2, idx2) != magic) throw new Exception(); + } + { + long idx3 = rnd.nextLong(); + long addr3 = addr - (idx3 >>> 2); + if (Tests.long_index_back_lshift(unsafe, addr3, idx3) != magic) throw new Exception(); + } + { + long idx4 = 0x12345678; + long addr4 = addr - idx4; + if (Tests.int_const_12345678_index(unsafe, addr4) != magic) throw new Exception(); + } + { + long idx5 = 0x1234567890abcdefL; + long addr5 = addr - idx5; + if (Tests.long_const_1234567890abcdef_index(unsafe, addr5) != magic) throw new Exception(); + } + { + int idx6 = rnd.nextInt(); + long addr6 = addr - (idx6 >> 2); + if (Tests.int_index_back_ashift(unsafe, addr6, idx6) != magic) throw new Exception(); + } + { + int idx7 = rnd.nextInt(); + long addr7 = addr - (idx7 >>> 2); + if (Tests.int_index_back_lshift(unsafe, addr7, idx7) != magic) throw new Exception(); + } + { + int idx8 = rnd.nextInt(); + long addr8 = addr - (idx8 * 16); + if (Tests.int_index_mul_scale_16(unsafe, addr8, idx8) != magic) throw new Exception(); + } + { + long idx9 = rnd.nextLong(); + long addr9 = addr - (idx9 * 16); + if (Tests.long_index_mul_scale_16(unsafe, addr9, idx9) != magic) throw new Exception(); + } + } + } +} diff --git a/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java b/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java new file mode 100644 index 0000000000000000000000000000000000000000..ec184bbcbee27b6521e60758f025fd9fd0d908a0 --- /dev/null +++ b/test/runtime/SharedArchiveFile/ArchiveDoesNotExist.java @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test ArchiveDoesNotExist + * @summary Test how VM handles "file does not exist" situation while + * attempting to use CDS archive. JVM should exit gracefully + * when sharing mode is ON, and continue w/o sharing if sharing + * mode is AUTO. + * @library /testlibrary + * @run main ArchiveDoesNotExist + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class ArchiveDoesNotExist { + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + File cdsFile = new File(fileName); + if (cdsFile.exists()) + throw new RuntimeException("Test error: cds file already exists"); + + // Sharing: on + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:on", + "-version"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldContain("Specified shared archive not found"); + output.shouldHaveExitValue(1); + + // Sharing: auto + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:auto", + "-version"); + + output = new OutputAnalyzer(pb.start()); + output.shouldMatch("(java|openjdk) version"); + output.shouldNotContain("sharing"); + output.shouldHaveExitValue(0); + } +} diff --git a/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java index 0e9bb07762f5c48e7c00a824dff7e99895e0c652..9ecb1dae390ee8f0535e4452c64476a2838b5007 100644 --- a/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java +++ b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,7 @@ * is different from object alignment for creating a CDS file * should fail when loading. * @library /testlibrary + * @bug 8025642 */ import com.oracle.java.testlibrary.*; @@ -82,7 +83,11 @@ public class CdsDifferentObjectAlignment { createAlignment, loadAlignment); - output.shouldContain(expectedErrorMsg); + try { + output.shouldContain(expectedErrorMsg); + } catch (RuntimeException e) { + output.shouldContain("Unable to use shared archive"); + } output.shouldHaveExitValue(1); } } diff --git a/test/runtime/SharedArchiveFile/DefaultUseWithClient.java b/test/runtime/SharedArchiveFile/DefaultUseWithClient.java new file mode 100644 index 0000000000000000000000000000000000000000..52cae81cc4fc3746b2d2ff7cad80cb18e5a6aed2 --- /dev/null +++ b/test/runtime/SharedArchiveFile/DefaultUseWithClient.java @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test DefaultUseWithClient + * @summary Test default behavior of sharing with -client + * @library /testlibrary + * @run main DefaultUseWithClient + * @bug 8032224 + */ + +import com.oracle.java.testlibrary.*; +import java.io.File; + +public class DefaultUseWithClient { + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + // On 32-bit windows CDS should be on by default in "-client" config + // Skip this test on any other platform + boolean is32BitWindows = (Platform.isWindows() && Platform.is32bit()); + if (!is32BitWindows) { + System.out.println("Test only applicable on 32-bit Windows. Skipping"); + return; + } + + // create the archive + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-Xshare:dump"); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + "-client", + "-XX:+PrintSharedSpaces", + "-version"); + + output = new OutputAnalyzer(pb.start()); + try { + output.shouldContain("sharing"); + } catch (RuntimeException e) { + // if sharing failed due to ASLR or similar reasons, + // check whether sharing was attempted at all (UseSharedSpaces) + output.shouldContain("UseSharedSpaces:"); + } + output.shouldHaveExitValue(0); + } +} diff --git a/test/runtime/SharedArchiveFile/LimitSharedSizes.java b/test/runtime/SharedArchiveFile/LimitSharedSizes.java new file mode 100644 index 0000000000000000000000000000000000000000..6989a643f59d596eade168ccae55e3ee26987736 --- /dev/null +++ b/test/runtime/SharedArchiveFile/LimitSharedSizes.java @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* @test LimitSharedSizes + * @summary Test handling of limits on shared space size + * @library /testlibrary + * @run main LimitSharedSizes + */ + +import com.oracle.java.testlibrary.*; + +public class LimitSharedSizes { + private static class SharedSizeTestData { + public String optionName; + public String optionValue; + public String expectedErrorMsg; + + public SharedSizeTestData(String name, String value, String msg) { + optionName = name; + optionValue = value; + expectedErrorMsg = msg; + } + } + + private static final SharedSizeTestData[] testTable = { + // values in this part of the test table should cause failure + // (shared space sizes are deliberately too small) + new SharedSizeTestData("-XX:SharedReadOnlySize", "4M", "read only"), + new SharedSizeTestData("-XX:SharedReadWriteSize","4M", "read write"), + + // Known issue, JDK-8038422 (assert() on Windows) + // new SharedSizeTestData("-XX:SharedMiscDataSize", "500k", "miscellaneous data"), + + // This will cause a VM crash; commenting out for now; see bug JDK-8038268 + // @ignore JDK-8038268 + // new SharedSizeTestData("-XX:SharedMiscCodeSize", "20k", "miscellaneous code"), + + // these values are larger than default ones, but should + // be acceptable and not cause failure + new SharedSizeTestData("-XX:SharedReadOnlySize", "20M", null), + new SharedSizeTestData("-XX:SharedReadWriteSize", "20M", null), + new SharedSizeTestData("-XX:SharedMiscDataSize", "20M", null), + new SharedSizeTestData("-XX:SharedMiscCodeSize", "20M", null) + }; + + public static void main(String[] args) throws Exception { + String fileName = "test.jsa"; + + for (SharedSizeTestData td : testTable) { + String option = td.optionName + "=" + td.optionValue; + System.out.println("testing option <" + option + ">"); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./" + fileName, + option, + "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + if (td.expectedErrorMsg != null) { + output.shouldContain("The shared " + td.expectedErrorMsg + + " space is not large enough"); + + output.shouldHaveExitValue(2); + } else { + output.shouldNotContain("space is not large enough"); + output.shouldHaveExitValue(0); + } + } + } +} diff --git a/test/runtime/SharedArchiveFile/SharedBaseAddress.java b/test/runtime/SharedArchiveFile/SharedBaseAddress.java new file mode 100644 index 0000000000000000000000000000000000000000..388fe7d0659683f0ade9e3d23eabef60fddc280b --- /dev/null +++ b/test/runtime/SharedArchiveFile/SharedBaseAddress.java @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test SharedBaseAddress + * @summary Test variety of values for SharedBaseAddress, making sure + * VM handles normal values as well as edge values w/o a crash. + * @library /testlibrary + * @run main SharedBaseAddress + */ + +import com.oracle.java.testlibrary.*; + +public class SharedBaseAddress { + + // shared base address test table + private static final String[] testTable = { + "1g", "8g", "64g","512g", "4t", + "32t", "128t", "0", + "1", "64k", "64M" + }; + + public static void main(String[] args) throws Exception { + // Known issue on Solaris-Sparc + // @ignore JDK-8044600 + if (Platform.isSolaris() && Platform.isSparc()) + return; + + for (String testEntry : testTable) { + System.out.println("sharedBaseAddress = " + testEntry); + + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=test.jsa", + "-XX:SharedBaseAddress=" + testEntry, + "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + + output.shouldContain("Loading classes to share"); + + try { + pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=test.jsa", + "-Xshare:on", + "-version"); + output = new OutputAnalyzer(pb.start()); + output.shouldContain("sharing"); + output.shouldHaveExitValue(0); + } catch (RuntimeException e) { + output.shouldContain("Unable to use shared archive"); + output.shouldHaveExitValue(1); + } + } + } +} diff --git a/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java b/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java new file mode 100644 index 0000000000000000000000000000000000000000..a95979f355c9d1485d8173c55f63ae130be7f479 --- /dev/null +++ b/test/runtime/SharedArchiveFile/SpaceUtilizationCheck.java @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +/* + * @test SpaceUtilizationCheck + * @summary Check if the space utilization for shared spaces is adequate + * @library /testlibrary + * @run main SpaceUtilizationCheck + */ + +import com.oracle.java.testlibrary.*; + +import java.util.regex.Pattern; +import java.util.regex.Matcher; +import java.util.ArrayList; +import java.lang.Integer; + +public class SpaceUtilizationCheck { + // Minimum allowed utilization value (percent) + // The goal is to have this number to be 50% for RO and RW regions + // Once that feature is implemented, increase the MIN_UTILIZATION to 50 + private static final int MIN_UTILIZATION = 30; + + // Only RO and RW regions are considered for this check, since they + // currently account for the bulk of the shared space + private static final int NUMBER_OF_CHECKED_SHARED_REGIONS = 2; + + public static void main(String[] args) throws Exception { + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + "-XX:+UnlockDiagnosticVMOptions", + "-XX:SharedArchiveFile=./test.jsa", + "-Xshare:dump"); + + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + String stdout = output.getStdout(); + ArrayList utilization = findUtilization(stdout); + + if (utilization.size() != NUMBER_OF_CHECKED_SHARED_REGIONS ) + throw new RuntimeException("The output format of sharing summary has changed"); + + for(String str : utilization) { + int value = Integer.parseInt(str); + if (value < MIN_UTILIZATION) { + System.out.println(stdout); + throw new RuntimeException("Utilization for one of the regions" + + "is below a threshold of " + MIN_UTILIZATION + "%"); + } + } + } + + public static ArrayList findUtilization(String input) { + ArrayList regions = filterRegionsOfInterest(input.split("\n")); + return filterByPattern(filterByPattern(regions, "bytes \\[.*% used\\]"), "\\d+"); + } + + private static ArrayList filterByPattern(Iterable input, String pattern) { + ArrayList result = new ArrayList(); + for (String str : input) { + Matcher matcher = Pattern.compile(pattern).matcher(str); + if (matcher.find()) { + result.add(matcher.group()); + } + } + return result; + } + + private static ArrayList filterRegionsOfInterest(String[] inputLines) { + ArrayList result = new ArrayList(); + for (String str : inputLines) { + if (str.contains("ro space:") || str.contains("rw space:")) { + result.add(str); + } + } + return result; + } +}