提交 0e9ae99a 编写于 作者: A amurillo

Merge

......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=20
HS_BUILD_NUMBER=16
HS_BUILD_NUMBER=17
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -1221,10 +1221,8 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
bool is_obj = (type == T_ARRAY || type == T_OBJECT);
LIR_Opr offset = off.result();
if (data != dst) {
__ move(data, dst);
data = dst;
}
// Because we want a 2-arg form of xchg
__ move(data, dst);
assert (!x->is_add() && (type == T_INT || (is_obj LP64_ONLY(&& UseCompressedOops))), "unexpected type");
LIR_Address* addr;
......@@ -1254,7 +1252,7 @@ void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) {
pre_barrier(ptr, LIR_OprFact::illegalOpr /* pre_val */,
true /* do_load */, false /* patch */, NULL);
}
__ xchg(LIR_OprFact::address(addr), data, dst, tmp);
__ xchg(LIR_OprFact::address(addr), dst, dst, tmp);
if (is_obj) {
// Seems to be a precise address
post_barrier(ptr, data);
......
......@@ -3653,9 +3653,9 @@ class StubGenerator: public StubCodeGenerator {
const Register len_reg = I4; // cipher length
const Register keylen = I5; // reg for storing expanded key array length
// save cipher len before save_frame, to return in the end
__ mov(O4, L0);
__ save_frame(0);
// save cipher len to return in the end
__ mov(len_reg, L0);
// read expanded key length
__ ldsw(Address(key, arrayOopDesc::length_offset_in_bytes() - arrayOopDesc::base_offset_in_bytes(T_INT)), keylen, 0);
......@@ -3778,9 +3778,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stf(FloatRegisterImpl::D, F60, rvec, 0);
__ stf(FloatRegisterImpl::D, F62, rvec, 8);
__ restore();
__ retl();
__ delayed()->mov(L0, O0);
__ mov(L0, I0);
__ ret();
__ delayed()->restore();
__ align(OptoLoopAlignment);
__ BIND(L_cbcenc192);
......@@ -3869,9 +3869,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stf(FloatRegisterImpl::D, F60, rvec, 0);
__ stf(FloatRegisterImpl::D, F62, rvec, 8);
__ restore();
__ retl();
__ delayed()->mov(L0, O0);
__ mov(L0, I0);
__ ret();
__ delayed()->restore();
__ align(OptoLoopAlignment);
__ BIND(L_cbcenc256);
......@@ -3962,9 +3962,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stf(FloatRegisterImpl::D, F60, rvec, 0);
__ stf(FloatRegisterImpl::D, F62, rvec, 8);
__ restore();
__ retl();
__ delayed()->mov(L0, O0);
__ mov(L0, I0);
__ ret();
__ delayed()->restore();
return start;
}
......@@ -3992,9 +3992,9 @@ class StubGenerator: public StubCodeGenerator {
const Register original_key = I5; // original key array only required during decryption
const Register keylen = L6; // reg for storing expanded key array length
// save cipher len before save_frame, to return in the end
__ mov(O4, L0);
__ save_frame(0); //args are read from I* registers since we save the frame in the beginning
// save cipher len to return in the end
__ mov(len_reg, L7);
// load original key from SunJCE expanded decryption key
// Since we load original key buffer starting first element, 8-byte alignment is guaranteed
......@@ -4568,10 +4568,9 @@ class StubGenerator: public StubCodeGenerator {
// re-init intial vector for next block, 8-byte alignment is guaranteed
__ stx(L0, rvec, 0);
__ stx(L1, rvec, 8);
__ restore();
__ mov(L0, O0);
__ retl();
__ delayed()->nop();
__ mov(L7, I0);
__ ret();
__ delayed()->restore();
return start;
}
......
......@@ -1083,7 +1083,7 @@ void LIR_OpLabel::emit_code(LIR_Assembler* masm) {
void LIR_OpArrayCopy::emit_code(LIR_Assembler* masm) {
masm->emit_arraycopy(this);
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) {
......@@ -1100,20 +1100,20 @@ void LIR_Op1::emit_code(LIR_Assembler* masm) {
void LIR_OpAllocObj::emit_code(LIR_Assembler* masm) {
masm->emit_alloc_obj(this);
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
void LIR_OpBranch::emit_code(LIR_Assembler* masm) {
masm->emit_opBranch(this);
if (stub()) {
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
}
void LIR_OpConvert::emit_code(LIR_Assembler* masm) {
masm->emit_opConvert(this);
if (stub() != NULL) {
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
}
......@@ -1123,13 +1123,13 @@ void LIR_Op2::emit_code(LIR_Assembler* masm) {
void LIR_OpAllocArray::emit_code(LIR_Assembler* masm) {
masm->emit_alloc_array(this);
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
void LIR_OpTypeCheck::emit_code(LIR_Assembler* masm) {
masm->emit_opTypeCheck(this);
if (stub()) {
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
}
......@@ -1144,7 +1144,7 @@ void LIR_Op3::emit_code(LIR_Assembler* masm) {
void LIR_OpLock::emit_code(LIR_Assembler* masm) {
masm->emit_lock(this);
if (stub()) {
masm->emit_code_stub(stub());
masm->append_code_stub(stub());
}
}
......
......@@ -1127,6 +1127,7 @@ class LIR_Op: public CompilationResourceObj {
virtual void print_instr(outputStream* out) const = 0;
virtual void print_on(outputStream* st) const PRODUCT_RETURN;
virtual bool is_patching() { return false; }
virtual LIR_OpCall* as_OpCall() { return NULL; }
virtual LIR_OpJavaCall* as_OpJavaCall() { return NULL; }
virtual LIR_OpLabel* as_OpLabel() { return NULL; }
......@@ -1387,6 +1388,7 @@ class LIR_Op1: public LIR_Op {
return (LIR_MoveKind)_flags;
}
virtual bool is_patching() { return _patch != lir_patch_none; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_Op1* as_Op1() { return this; }
virtual const char * name() const PRODUCT_RETURN0;
......@@ -1619,6 +1621,7 @@ public:
int profiled_bci() const { return _profiled_bci; }
bool should_profile() const { return _should_profile; }
virtual bool is_patching() { return _info_for_patch != NULL; }
virtual void emit_code(LIR_Assembler* masm);
virtual LIR_OpTypeCheck* as_OpTypeCheck() { return this; }
void print_instr(outputStream* out) const PRODUCT_RETURN;
......
......@@ -58,7 +58,7 @@ void LIR_Assembler::patching_epilog(PatchingStub* patch, LIR_PatchCode patch_cod
_masm->nop();
}
patch->install(_masm, patch_code, obj, info);
append_patching_stub(patch);
append_code_stub(patch);
#ifdef ASSERT
Bytecodes::Code code = info->scope()->method()->java_code_at_bci(info->stack()->bci());
......@@ -131,11 +131,6 @@ LIR_Assembler::~LIR_Assembler() {
}
void LIR_Assembler::append_patching_stub(PatchingStub* stub) {
_slow_case_stubs->append(stub);
}
void LIR_Assembler::check_codespace() {
CodeSection* cs = _masm->code_section();
if (cs->remaining() < (int)(NOT_LP64(1*K)LP64_ONLY(2*K))) {
......@@ -144,7 +139,7 @@ void LIR_Assembler::check_codespace() {
}
void LIR_Assembler::emit_code_stub(CodeStub* stub) {
void LIR_Assembler::append_code_stub(CodeStub* stub) {
_slow_case_stubs->append(stub);
}
......@@ -435,7 +430,7 @@ void LIR_Assembler::add_debug_info_for_null_check_here(CodeEmitInfo* cinfo) {
void LIR_Assembler::add_debug_info_for_null_check(int pc_offset, CodeEmitInfo* cinfo) {
ImplicitNullCheckStub* stub = new ImplicitNullCheckStub(pc_offset, cinfo);
emit_code_stub(stub);
append_code_stub(stub);
}
void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
......@@ -444,7 +439,7 @@ void LIR_Assembler::add_debug_info_for_div0_here(CodeEmitInfo* info) {
void LIR_Assembler::add_debug_info_for_div0(int pc_offset, CodeEmitInfo* cinfo) {
DivByZeroStub* stub = new DivByZeroStub(pc_offset, cinfo);
emit_code_stub(stub);
append_code_stub(stub);
}
void LIR_Assembler::emit_rtcall(LIR_OpRTCall* op) {
......
......@@ -143,7 +143,7 @@ class LIR_Assembler: public CompilationResourceObj {
// stubs
void emit_slow_case_stubs();
void emit_static_call_stub();
void emit_code_stub(CodeStub* op);
void append_code_stub(CodeStub* op);
void add_call_info_here(CodeEmitInfo* info) { add_call_info(code_offset(), info); }
// code patterns
......
......@@ -2382,16 +2382,6 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
int arg_count = frame_map()->oop_map_arg_count();
OopMap* map = new OopMap(frame_size, arg_count);
// Check if this is a patch site.
bool is_patch_info = false;
if (op->code() == lir_move) {
assert(!is_call_site, "move must not be a call site");
assert(op->as_Op1() != NULL, "move must be LIR_Op1");
LIR_Op1* move = (LIR_Op1*)op;
is_patch_info = move->patch_code() != lir_patch_none;
}
// Iterate through active intervals
for (Interval* interval = iw->active_first(fixedKind); interval != Interval::end(); interval = interval->next()) {
int assigned_reg = interval->assigned_reg();
......@@ -2406,7 +2396,7 @@ OopMap* LinearScan::compute_oop_map(IntervalWalker* iw, LIR_Op* op, CodeEmitInfo
// moves, any intervals which end at this instruction are included
// in the oop map since we may safepoint while doing the patch
// before we've consumed the inputs.
if (is_patch_info || op->id() < interval->current_to()) {
if (op->is_patching() || op->id() < interval->current_to()) {
// caller-save registers must not be included into oop-maps at calls
assert(!is_call_site || assigned_reg >= nof_regs || !is_caller_save(assigned_reg), "interval is in a caller-save register at a call -> register will be overwritten");
......
......@@ -819,7 +819,7 @@ void ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurren
// false before we start remark. At this point we should also be
// in a STW phase.
assert(!concurrent_marking_in_progress(), "invariant");
assert(_finger == _heap_end,
assert(out_of_regions(),
err_msg("only way to get here: _finger: "PTR_FORMAT", _heap_end: "PTR_FORMAT,
p2i(_finger), p2i(_heap_end)));
update_g1_committed(true);
......@@ -978,7 +978,9 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
if (concurrent()) {
ConcurrentGCThread::stsLeave();
}
_first_overflow_barrier_sync.enter();
bool barrier_aborted = !_first_overflow_barrier_sync.enter();
if (concurrent()) {
ConcurrentGCThread::stsJoin();
}
......@@ -986,7 +988,17 @@ void ConcurrentMark::enter_first_sync_barrier(uint worker_id) {
// more work
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
if (barrier_aborted) {
gclog_or_tty->print_cr("[%u] aborted first barrier", worker_id);
} else {
gclog_or_tty->print_cr("[%u] leaving first barrier", worker_id);
}
}
if (barrier_aborted) {
// If the barrier aborted we ignore the overflow condition and
// just abort the whole marking phase as quickly as possible.
return;
}
// If we're executing the concurrent phase of marking, reset the marking
......@@ -1026,14 +1038,20 @@ void ConcurrentMark::enter_second_sync_barrier(uint worker_id) {
if (concurrent()) {
ConcurrentGCThread::stsLeave();
}
_second_overflow_barrier_sync.enter();
bool barrier_aborted = !_second_overflow_barrier_sync.enter();
if (concurrent()) {
ConcurrentGCThread::stsJoin();
}
// at this point everything should be re-initialized and ready to go
if (verbose_low()) {
gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
if (barrier_aborted) {
gclog_or_tty->print_cr("[%u] aborted second barrier", worker_id);
} else {
gclog_or_tty->print_cr("[%u] leaving second barrier", worker_id);
}
}
}
......@@ -3232,6 +3250,8 @@ void ConcurrentMark::abort() {
for (uint i = 0; i < _max_worker_id; ++i) {
_tasks[i]->clear_region_fields();
}
_first_overflow_barrier_sync.abort();
_second_overflow_barrier_sync.abort();
_has_aborted = true;
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
......
......@@ -542,8 +542,12 @@ protected:
// frequently.
HeapRegion* claim_region(uint worker_id);
// It determines whether we've run out of regions to scan.
bool out_of_regions() { return _finger == _heap_end; }
// It determines whether we've run out of regions to scan. Note that
// the finger can point past the heap end in case the heap was expanded
// to satisfy an allocation without doing a GC. This is fine, because all
// objects in those regions will be considered live anyway because of
// SATB guarantees (i.e. their TAMS will be equal to bottom).
bool out_of_regions() { return _finger >= _heap_end; }
// Returns the task with the given id
CMTask* task(int id) {
......
......@@ -89,6 +89,10 @@ void ConcurrentMarkThread::run() {
while (!_should_terminate) {
// wait until started is set.
sleepBeforeNextCycle();
if (_should_terminate) {
break;
}
{
ResourceMark rm;
HandleMark hm;
......@@ -303,11 +307,21 @@ void ConcurrentMarkThread::yield() {
}
void ConcurrentMarkThread::stop() {
// it is ok to take late safepoints here, if needed
MutexLockerEx mu(Terminator_lock);
_should_terminate = true;
while (!_has_terminated) {
Terminator_lock->wait();
{
MutexLockerEx ml(Terminator_lock);
_should_terminate = true;
}
{
MutexLockerEx ml(CGC_lock, Mutex::_no_safepoint_check_flag);
CGC_lock->notify_all();
}
{
MutexLockerEx ml(Terminator_lock);
while (!_has_terminated) {
Terminator_lock->wait();
}
}
}
......@@ -327,11 +341,14 @@ void ConcurrentMarkThread::sleepBeforeNextCycle() {
assert(!in_progress(), "should have been cleared");
MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
while (!started()) {
while (!started() && !_should_terminate) {
CGC_lock->wait(Mutex::_no_safepoint_check_flag);
}
set_in_progress();
clear_started();
if (started()) {
set_in_progress();
clear_started();
}
}
// Note: As is the case with CMS - this method, although exported
......
......@@ -435,6 +435,9 @@ HeapRegion* G1CollectedHeap::pop_dirty_cards_region()
void G1CollectedHeap::stop_conc_gc_threads() {
_cg1r->stop();
_cmThread->stop();
if (G1StringDedup::is_enabled()) {
G1StringDedup::stop();
}
}
#ifdef ASSERT
......@@ -2182,6 +2185,23 @@ jint G1CollectedHeap::initialize() {
return JNI_OK;
}
void G1CollectedHeap::stop() {
#if 0
// Stopping concurrent worker threads is currently disabled until
// some bugs in concurrent mark has been resolve. Without fixing
// those bugs first we risk haning during VM exit when trying to
// stop these threads.
// Abort any ongoing concurrent root region scanning and stop all
// concurrent threads. We do this to make sure these threads do
// not continue to execute and access resources (e.g. gclog_or_tty)
// that are destroyed during shutdown.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
stop_conc_gc_threads();
#endif
}
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
......
......@@ -1082,6 +1082,8 @@ public:
// specified by the policy object.
jint initialize();
virtual void stop();
// Return the (conservative) maximum heap alignment for any G1 heap
static size_t conservative_max_heap_alignment();
......
......@@ -95,7 +95,15 @@ void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
jbyte *const first = byte_for(mr.start());
jbyte *const last = byte_after(mr.last());
memset(first, g1_young_gen, last - first);
// Below we may use an explicit loop instead of memset() because on
// certain platforms memset() can give concurrent readers phantom zeros.
if (UseMemSetInBOT) {
memset(first, g1_young_gen, last - first);
} else {
for (jbyte* i = first; i < last; i++) {
*i = g1_young_gen;
}
}
}
#ifndef PRODUCT
......
......@@ -44,6 +44,11 @@ void G1StringDedup::initialize() {
}
}
void G1StringDedup::stop() {
assert(is_enabled(), "String deduplication not enabled");
G1StringDedupThread::stop();
}
bool G1StringDedup::is_candidate_from_mark(oop obj) {
if (java_lang_String::is_instance(obj)) {
bool from_young = G1CollectedHeap::heap()->heap_region_containing_raw(obj)->is_young();
......
......@@ -110,8 +110,12 @@ public:
return _enabled;
}
// Initialize string deduplication.
static void initialize();
// Stop the deduplication thread.
static void stop();
// Immediately deduplicates the given String object, bypassing the
// the deduplication queue.
static void deduplicate(oop java_string);
......
......@@ -35,6 +35,7 @@ const size_t G1StringDedupQueue::_max_cache_size = 0; // Max cache size p
G1StringDedupQueue::G1StringDedupQueue() :
_cursor(0),
_cancel(false),
_empty(true),
_dropped(0) {
_nqueues = MAX2(ParallelGCThreads, (size_t)1);
......@@ -55,11 +56,17 @@ void G1StringDedupQueue::create() {
void G1StringDedupQueue::wait() {
MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
while (_queue->_empty) {
while (_queue->_empty && !_queue->_cancel) {
ml.wait(Mutex::_no_safepoint_check_flag);
}
}
void G1StringDedupQueue::cancel_wait() {
MonitorLockerEx ml(StringDedupQueue_lock, Mutex::_no_safepoint_check_flag);
_queue->_cancel = true;
ml.notify();
}
void G1StringDedupQueue::push(uint worker_id, oop java_string) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
assert(worker_id < _queue->_nqueues, "Invalid queue");
......
......@@ -65,6 +65,7 @@ private:
G1StringDedupWorkerQueue* _queues;
size_t _nqueues;
size_t _cursor;
bool _cancel;
volatile bool _empty;
// Statistics counter, only used for logging.
......@@ -81,6 +82,9 @@ public:
// Blocks and waits for the queue to become non-empty.
static void wait();
// Wakes up any thread blocked waiting for the queue to become non-empty.
static void cancel_wait();
// Pushes a deduplication candidate onto a specific GC worker queue.
static void push(uint worker_id, oop java_string);
......
......@@ -73,6 +73,9 @@ void G1StringDedupThread::run() {
// Wait for the queue to become non-empty
G1StringDedupQueue::wait();
if (_should_terminate) {
break;
}
// Include this thread in safepoints
stsJoin();
......@@ -108,7 +111,23 @@ void G1StringDedupThread::run() {
stsLeave();
}
ShouldNotReachHere();
terminate();
}
void G1StringDedupThread::stop() {
{
MonitorLockerEx ml(Terminator_lock);
_thread->_should_terminate = true;
}
G1StringDedupQueue::cancel_wait();
{
MonitorLockerEx ml(Terminator_lock);
while (!_thread->_has_terminated) {
ml.wait();
}
}
}
void G1StringDedupThread::print(outputStream* st, const G1StringDedupStat& last_stat, const G1StringDedupStat& total_stat) {
......
......@@ -47,6 +47,8 @@ private:
public:
static void create();
static void stop();
static G1StringDedupThread* thread();
virtual void run();
......
......@@ -208,6 +208,9 @@ class CollectedHeap : public CHeapObj<mtInternal> {
// This is the correct place to place such initialization methods.
virtual void post_initialize() = 0;
// Stop any onging concurrent work and prepare for exit.
virtual void stop() {}
MemRegion reserved_region() const { return _reserved; }
address base() const { return (address)reserved_region().start(); }
......
......@@ -561,6 +561,7 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) {
_chunk = new (alloc_failmode, len) Chunk(len);
if (_chunk == NULL) {
_chunk = k; // restore the previous value of _chunk
return NULL;
}
if (k) k->set_next(_chunk); // Append new chunk to end of linked list
......
......@@ -307,9 +307,9 @@ JvmtiEnv::GetObjectSize(jobject object, jlong* size_ptr) {
!java_lang_Class::is_primitive(mirror)) {
Klass* k = java_lang_Class::as_Klass(mirror);
assert(k != NULL, "class for non-primitive mirror must exist");
*size_ptr = k->size() * wordSize;
*size_ptr = (jlong)k->size() * wordSize;
} else {
*size_ptr = mirror->size() * wordSize;
*size_ptr = (jlong)mirror->size() * wordSize;
}
return JVMTI_ERROR_NONE;
} /* end GetObjectSize */
......
......@@ -497,6 +497,9 @@ void before_exit(JavaThread * thread) {
os::infinite_sleep();
}
// Stop any ongoing concurrent GC work
Universe::heap()->stop();
// Terminate watcher thread - must before disenrolling any periodic task
if (PeriodicTask::num_tasks() > 0)
WatcherThread::stop();
......
......@@ -378,21 +378,22 @@ const char* AbstractGangTask::name() const {
WorkGangBarrierSync::WorkGangBarrierSync()
: _monitor(Mutex::safepoint, "work gang barrier sync", true),
_n_workers(0), _n_completed(0), _should_reset(false) {
_n_workers(0), _n_completed(0), _should_reset(false), _aborted(false) {
}
WorkGangBarrierSync::WorkGangBarrierSync(uint n_workers, const char* name)
: _monitor(Mutex::safepoint, name, true),
_n_workers(n_workers), _n_completed(0), _should_reset(false) {
_n_workers(n_workers), _n_completed(0), _should_reset(false), _aborted(false) {
}
void WorkGangBarrierSync::set_n_workers(uint n_workers) {
_n_workers = n_workers;
_n_completed = 0;
_n_workers = n_workers;
_n_completed = 0;
_should_reset = false;
_aborted = false;
}
void WorkGangBarrierSync::enter() {
bool WorkGangBarrierSync::enter() {
MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
if (should_reset()) {
// The should_reset() was set and we are the first worker to enter
......@@ -415,10 +416,17 @@ void WorkGangBarrierSync::enter() {
set_should_reset(true);
monitor()->notify_all();
} else {
while (n_completed() != n_workers()) {
while (n_completed() != n_workers() && !aborted()) {
monitor()->wait(/* no_safepoint_check */ true);
}
}
return !aborted();
}
void WorkGangBarrierSync::abort() {
MutexLockerEx x(monitor(), Mutex::_no_safepoint_check_flag);
set_aborted();
monitor()->notify_all();
}
// SubTasksDone functions.
......
......@@ -359,18 +359,20 @@ class FlexibleWorkGang: public WorkGang {
class WorkGangBarrierSync : public StackObj {
protected:
Monitor _monitor;
uint _n_workers;
uint _n_completed;
uint _n_workers;
uint _n_completed;
bool _should_reset;
bool _aborted;
Monitor* monitor() { return &_monitor; }
uint n_workers() { return _n_workers; }
uint n_completed() { return _n_completed; }
bool should_reset() { return _should_reset; }
bool aborted() { return _aborted; }
void zero_completed() { _n_completed = 0; }
void inc_completed() { _n_completed++; }
void set_aborted() { _aborted = true; }
void set_should_reset(bool v) { _should_reset = v; }
public:
......@@ -383,8 +385,14 @@ public:
// Enter the barrier. A worker that enters the barrier will
// not be allowed to leave until all other threads have
// also entered the barrier.
void enter();
// also entered the barrier or the barrier is aborted.
// Returns false if the barrier was aborted.
bool enter();
// Aborts the barrier and wakes up any threads waiting for
// the barrier to complete. The barrier will remain in the
// aborted state until the next call to set_n_workers().
void abort();
};
// A class to manage claiming of subtasks within a group of tasks. The
......
......@@ -134,6 +134,8 @@ needs_compact3 = \
gc/arguments/TestDynMaxHeapFreeRatio.java \
runtime/InternalApi/ThreadCpuTimesDeadlock.java \
serviceability/threads/TestFalseDeadLock.java \
serviceability/jvmti/GetObjectSizeOverflow.java \
serviceability/jvmti/TestRedefineWithUnresolvedClass.java \
compiler/tiered/NonTieredLevelsTest.java \
compiler/tiered/TieredLevelsTest.java \
compiler/intrinsics/bmi/verifycode
......
/*
* Copyright (c) 2014 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.io.PrintWriter;
import com.oracle.java.testlibrary.*;
/*
* Test to verify GetObjectSize does not overflow on a 600M element int[]
*
* @test
* @bug 8027230
* @library /testlibrary
* @build GetObjectSizeOverflowAgent
* @run main ClassFileInstaller GetObjectSizeOverflowAgent
* @run main GetObjectSizeOverflow
*/
public class GetObjectSizeOverflow {
public static void main(String[] args) throws Exception {
if (!Platform.is64bit()) {
System.out.println("Test needs a 4GB heap and can only be run as a 64bit process, skipping.");
return;
}
PrintWriter pw = new PrintWriter("MANIFEST.MF");
pw.println("Premain-Class: GetObjectSizeOverflowAgent");
pw.close();
ProcessBuilder pb = new ProcessBuilder();
pb.command(new String[] { JDKToolFinder.getJDKTool("jar"), "cmf", "MANIFEST.MF", "agent.jar", "GetObjectSizeOverflowAgent.class"});
pb.start().waitFor();
ProcessBuilder pt = ProcessTools.createJavaProcessBuilder(true, "-Xmx4000m", "-javaagent:agent.jar", "GetObjectSizeOverflowAgent");
OutputAnalyzer output = new OutputAnalyzer(pt.start());
if (output.getStdout().contains("Could not reserve enough space") || output.getStderr().contains("java.lang.OutOfMemoryError")) {
System.out.println("stdout: " + output.getStdout());
System.out.println("stderr: " + output.getStderr());
System.out.println("Test could not reserve or allocate enough space, skipping");
return;
}
output.stdoutShouldContain("GetObjectSizeOverflow passed");
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
import java.lang.instrument.*;
public class GetObjectSizeOverflowAgent {
static Instrumentation instrumentation;
public static void premain(String agentArgs, Instrumentation instrumentation) {
GetObjectSizeOverflowAgent.instrumentation = instrumentation;
}
public static void main(String[] args) throws Exception {
int[] a = new int[600_000_000];
long size = instrumentation.getObjectSize(a);
if (size < 2_400_000_000L) {
throw new RuntimeException("Invalid size of array, expected >= 2400000000, got " + size);
}
System.out.println("GetObjectSizeOverflow passed");
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册