提交 45e70d71 编写于 作者: M mlarsson

8072621: Clean up around VM_GC_Operations

Reviewed-by: brutisso, jmasa
上级 6d938636
/*
* Copyright (c) 2005, 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -130,8 +130,8 @@ class VM_CMS_Final_Remark: public VM_CMS_Operation {
class VM_GenCollectFullConcurrent: public VM_GC_Operation {
bool _disabled_icms;
public:
VM_GenCollectFullConcurrent(unsigned int gc_count_before,
unsigned int full_gc_count_before,
VM_GenCollectFullConcurrent(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_disabled_icms(false)
......
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -353,7 +353,7 @@ void YoungList::print() {
HeapRegion* lists[] = {_head, _survivor_head};
const char* names[] = {"YOUNG", "SURVIVOR"};
for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
HeapRegion *curr = lists[list];
if (curr == NULL)
......@@ -827,8 +827,8 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "we do not allow humongous TLABs");
unsigned int dummy_gc_count_before;
int dummy_gclocker_retry_count = 0;
uint dummy_gc_count_before;
uint dummy_gclocker_retry_count = 0;
return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
}
......@@ -838,8 +838,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
assert_heap_not_locked_and_not_at_safepoint();
// Loop until the allocation is satisfied, or unsatisfied after GC.
for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
unsigned int gc_count_before;
for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
uint gc_count_before;
HeapWord* result = NULL;
if (!isHumongous(word_size)) {
......@@ -891,8 +891,8 @@ G1CollectedHeap::mem_allocate(size_t word_size,
HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
unsigned int *gc_count_before_ret,
int* gclocker_retry_count_ret) {
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
// Make sure you read the note in attempt_allocation_humongous().
assert_heap_not_locked_and_not_at_safepoint();
......@@ -909,7 +909,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
HeapWord* result = NULL;
for (int try_count = 1; /* we'll return */; try_count += 1) {
bool should_try_gc;
unsigned int gc_count_before;
uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
......@@ -953,7 +953,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
if (should_try_gc) {
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded,
GCCause::_g1_inc_collection_pause);
GCCause::_g1_inc_collection_pause);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
return result;
......@@ -1007,8 +1007,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
}
HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
unsigned int * gc_count_before_ret,
int* gclocker_retry_count_ret) {
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
// The structure of this method has a lot of similarities to
// attempt_allocation_slow(). The reason these two were not merged
// into a single one is that such a method would require several "if
......@@ -1041,7 +1041,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
HeapWord* result = NULL;
for (int try_count = 1; /* we'll return */; try_count += 1) {
bool should_try_gc;
unsigned int gc_count_before;
uint gc_count_before;
{
MutexLockerEx x(Heap_lock);
......@@ -1079,7 +1079,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
bool succeeded;
result = do_collection_pause(word_size, gc_count_before, &succeeded,
GCCause::_g1_humongous_allocation);
GCCause::_g1_humongous_allocation);
if (result != NULL) {
assert(succeeded, "only way to get back a non-NULL result");
return result;
......@@ -1887,7 +1887,7 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
assert(n_rem_sets > 0, "Invariant.");
_worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
_worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
_evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
for (int i = 0; i < n_queues; i++) {
......@@ -2473,9 +2473,9 @@ G1YCType G1CollectedHeap::yc_type() {
void G1CollectedHeap::collect(GCCause::Cause cause) {
assert_heap_not_locked();
unsigned int gc_count_before;
unsigned int old_marking_count_before;
unsigned int full_gc_count_before;
uint gc_count_before;
uint old_marking_count_before;
uint full_gc_count_before;
bool retry_gc;
do {
......@@ -3613,7 +3613,7 @@ void G1CollectedHeap::gc_epilogue(bool full) {
}
HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
unsigned int gc_count_before,
uint gc_count_before,
bool* succeeded,
GCCause::Cause gc_cause) {
assert_heap_not_locked_and_not_at_safepoint();
......
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -343,11 +343,11 @@ private:
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have started.
volatile unsigned int _old_marking_cycles_started;
volatile uint _old_marking_cycles_started;
// Keeps track of how many "old marking cycles" (i.e., Full GCs or
// concurrent cycles) we have completed.
volatile unsigned int _old_marking_cycles_completed;
volatile uint _old_marking_cycles_completed;
bool _concurrent_cycle_started;
bool _heap_summary_sent;
......@@ -515,22 +515,22 @@ protected:
// the mutator alloc region without taking the Heap_lock. This
// should only be used for non-humongous allocations.
inline HeapWord* attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret);
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret);
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret);
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.
HeapWord* attempt_allocation_humongous(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret);
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret);
// Allocation attempt that should be called during safepoints (e.g.,
// at the end of a successful GC). expect_null_mutator_alloc_region
......@@ -701,7 +701,7 @@ public:
// +ExplicitGCInvokesConcurrent).
void increment_old_marking_cycles_completed(bool concurrent);
unsigned int old_marking_cycles_completed() {
uint old_marking_cycles_completed() {
return _old_marking_cycles_completed;
}
......@@ -760,7 +760,7 @@ protected:
// methods that call do_collection_pause() release the Heap_lock
// before the call, so it's easy to read gc_count_before just before.
HeapWord* do_collection_pause(size_t word_size,
unsigned int gc_count_before,
uint gc_count_before,
bool* succeeded,
GCCause::Cause gc_cause);
......@@ -983,7 +983,7 @@ protected:
// The heap region entry for a given worker is valid iff
// the associated time stamp value matches the current value
// of G1CollectedHeap::_gc_time_stamp.
unsigned int* _worker_cset_start_region_time_stamp;
uint* _worker_cset_start_region_time_stamp;
volatile bool _free_regions_coming;
......
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -131,8 +131,8 @@ inline bool G1CollectedHeap::obj_in_cs(oop obj) {
}
inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret) {
uint* gc_count_before_ret,
uint* gclocker_retry_count_ret) {
assert_heap_not_locked_and_not_at_safepoint();
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
......
/*
* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -34,9 +34,8 @@
#include "gc_implementation/g1/vm_operations_g1.hpp"
#include "runtime/interfaceSupport.hpp"
VM_G1CollectForAllocation::VM_G1CollectForAllocation(
unsigned int gc_count_before,
size_t word_size)
VM_G1CollectForAllocation::VM_G1CollectForAllocation(uint gc_count_before,
size_t word_size)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size,
GCCause::_allocation_failure) {
guarantee(word_size > 0, "an allocation should always be requested");
......@@ -57,12 +56,11 @@ void VM_G1CollectFull::doit() {
g1h->do_full_collection(false /* clear_all_soft_refs */);
}
VM_G1IncCollectionPause::VM_G1IncCollectionPause(
unsigned int gc_count_before,
size_t word_size,
bool should_initiate_conc_mark,
double target_pause_time_ms,
GCCause::Cause gc_cause)
VM_G1IncCollectionPause::VM_G1IncCollectionPause(uint gc_count_before,
size_t word_size,
bool should_initiate_conc_mark,
double target_pause_time_ms,
GCCause::Cause gc_cause)
: VM_G1OperationWithAllocRequest(gc_count_before, word_size, gc_cause),
_should_initiate_conc_mark(should_initiate_conc_mark),
_target_pause_time_ms(target_pause_time_ms),
......
/*
* Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -44,8 +44,8 @@ protected:
AllocationContext_t _allocation_context;
public:
VM_G1OperationWithAllocRequest(unsigned int gc_count_before,
size_t word_size,
VM_G1OperationWithAllocRequest(uint gc_count_before,
size_t word_size,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause),
_word_size(word_size), _result(NULL), _pause_succeeded(false) { }
......@@ -57,8 +57,8 @@ public:
class VM_G1CollectFull: public VM_GC_Operation {
public:
VM_G1CollectFull(unsigned int gc_count_before,
unsigned int full_gc_count_before,
VM_G1CollectFull(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause cause)
: VM_GC_Operation(gc_count_before, cause, full_gc_count_before, true) { }
virtual VMOp_Type type() const { return VMOp_G1CollectFull; }
......@@ -70,7 +70,7 @@ public:
class VM_G1CollectForAllocation: public VM_G1OperationWithAllocRequest {
public:
VM_G1CollectForAllocation(unsigned int gc_count_before,
VM_G1CollectForAllocation(uint gc_count_before,
size_t word_size);
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
virtual void doit();
......@@ -84,9 +84,9 @@ private:
bool _should_initiate_conc_mark;
bool _should_retry_gc;
double _target_pause_time_ms;
unsigned int _old_marking_cycles_completed_before;
uint _old_marking_cycles_completed_before;
public:
VM_G1IncCollectionPause(unsigned int gc_count_before,
VM_G1IncCollectionPause(uint gc_count_before,
size_t word_size,
bool should_initiate_conc_mark,
double target_pause_time_ms,
......
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -261,7 +261,7 @@ HeapWord* ParallelScavengeHeap::mem_allocate(
uint loop_count = 0;
uint gc_count = 0;
int gclocker_stalled_count = 0;
uint gclocker_stalled_count = 0;
while (result == NULL) {
// We don't want to have multiple collections for a single filled generation.
......@@ -521,8 +521,8 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
assert(!Heap_lock->owned_by_self(),
"this thread should not own the Heap_lock");
unsigned int gc_count = 0;
unsigned int full_gc_count = 0;
uint gc_count = 0;
uint full_gc_count = 0;
{
MutexLocker ml(Heap_lock);
// This value is guarded by the Heap_lock
......
/*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -33,7 +33,7 @@
// The following methods are used by the parallel scavenge collector
VM_ParallelGCFailedAllocation::VM_ParallelGCFailedAllocation(size_t size,
unsigned int gc_count) :
uint gc_count) :
VM_GC_Operation(gc_count, GCCause::_allocation_failure),
_size(size),
_result(NULL)
......@@ -55,8 +55,8 @@ void VM_ParallelGCFailedAllocation::doit() {
}
// Only used for System.gc() calls
VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(unsigned int gc_count,
unsigned int full_gc_count,
VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
uint full_gc_count,
GCCause::Cause gc_cause) :
VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */)
{
......
/*
* Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -35,7 +35,7 @@ class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
HeapWord* _result;
public:
VM_ParallelGCFailedAllocation(size_t size, unsigned int gc_count);
VM_ParallelGCFailedAllocation(size_t size, uint gc_count);
virtual VMOp_Type type() const {
return VMOp_ParallelGCFailedAllocation;
......@@ -47,8 +47,7 @@ class VM_ParallelGCFailedAllocation: public VM_GC_Operation {
class VM_ParallelGCSystemGC: public VM_GC_Operation {
public:
VM_ParallelGCSystemGC(unsigned int gc_count, unsigned int full_gc_count,
GCCause::Cause gc_cause);
VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
virtual void doit();
};
......
/*
* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -66,13 +66,13 @@
class VM_GC_Operation: public VM_Operation {
protected:
BasicLock _pending_list_basic_lock; // for refs pending list notification (PLL)
unsigned int _gc_count_before; // gc count before acquiring PLL
unsigned int _full_gc_count_before; // full gc count before acquiring PLL
bool _full; // whether a "full" collection
bool _prologue_succeeded; // whether doit_prologue succeeded
BasicLock _pending_list_basic_lock; // for refs pending list notification (PLL)
uint _gc_count_before; // gc count before acquiring PLL
uint _full_gc_count_before; // full gc count before acquiring PLL
bool _full; // whether a "full" collection
bool _prologue_succeeded; // whether doit_prologue succeeded
GCCause::Cause _gc_cause; // the putative cause for this gc op
bool _gc_locked; // will be set if gc was locked
bool _gc_locked; // will be set if gc was locked
virtual bool skip_operation() const;
......@@ -81,9 +81,9 @@ class VM_GC_Operation: public VM_Operation {
void release_and_notify_pending_list_lock();
public:
VM_GC_Operation(unsigned int gc_count_before,
VM_GC_Operation(uint gc_count_before,
GCCause::Cause _cause,
unsigned int full_gc_count_before = 0,
uint full_gc_count_before = 0,
bool full = false) {
_full = full;
_prologue_succeeded = false;
......@@ -169,7 +169,7 @@ class VM_GenCollectForAllocation: public VM_GC_Operation {
public:
VM_GenCollectForAllocation(size_t size,
bool tlab,
unsigned int gc_count_before)
uint gc_count_before)
: VM_GC_Operation(gc_count_before, GCCause::_allocation_failure),
_size(size),
_tlab(tlab) {
......@@ -181,17 +181,16 @@ class VM_GenCollectForAllocation: public VM_GC_Operation {
HeapWord* result() const { return _res; }
};
// VM operation to invoke a collection of the heap as a
// GenCollectedHeap heap.
class VM_GenCollectFull: public VM_GC_Operation {
private:
int _max_level;
public:
VM_GenCollectFull(unsigned int gc_count_before,
unsigned int full_gc_count_before,
VM_GenCollectFull(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause,
int max_level)
int max_level)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
_max_level(max_level) { }
~VM_GenCollectFull() {}
......@@ -208,9 +207,9 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
public:
VM_CollectForMetadataAllocation(ClassLoaderData* loader_data,
size_t size, Metaspace::MetadataType mdtype,
unsigned int gc_count_before,
unsigned int full_gc_count_before,
GCCause::Cause gc_cause)
uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause)
: VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
_loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
}
......
/*
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -656,7 +656,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
// Loop until the allocation is satisified,
// or unsatisfied after GC.
for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
HandleMark hm; // discard any handles allocated in each iteration
// First allocation attempt is lock-free.
......@@ -670,7 +670,7 @@ HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
return result;
}
}
unsigned int gc_count_before; // read inside the Heap_lock locked region
uint gc_count_before; // read inside the Heap_lock locked region
{
MutexLocker ml(Heap_lock);
if (PrintGC && Verbose) {
......
......@@ -1494,7 +1494,7 @@ class CommandLineFlags {
"How much the GC can expand the eden by while the GC locker " \
"is active (as a percentage)") \
\
diagnostic(intx, GCLockerRetryAllocationCount, 2, \
diagnostic(uintx, GCLockerRetryAllocationCount, 2, \
"Number of times to retry allocations when " \
"blocked by the GC locker") \
\
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册