提交 1a41ad41 编写于 作者: T tonyp

Merge

#
# Copyright (c) 2000, 2008, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -84,6 +84,7 @@ sun.jvm.hotspot.debugger.windbg.ia64 \
sun.jvm.hotspot.debugger.windbg.x86 \
sun.jvm.hotspot.debugger.x86 \
sun.jvm.hotspot.gc_implementation \
sun.jvm.hotspot.gc_implementation.g1 \
sun.jvm.hotspot.gc_implementation.parallelScavenge \
sun.jvm.hotspot.gc_implementation.shared \
sun.jvm.hotspot.gc_interface \
......@@ -176,6 +177,9 @@ sun/jvm/hotspot/debugger/windbg/*.java \
sun/jvm/hotspot/debugger/windbg/ia64/*.java \
sun/jvm/hotspot/debugger/windbg/x86/*.java \
sun/jvm/hotspot/debugger/x86/*.java \
sun/jvm/hotspot/gc_implementation/g1/*.java \
sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
sun/jvm/hotspot/gc_implementation/shared/*.java \
sun/jvm/hotspot/interpreter/*.java \
sun/jvm/hotspot/jdi/*.java \
sun/jvm/hotspot/livejvm/*.java \
......
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.gc_interface.CollectedHeapName;
import sun.jvm.hotspot.memory.MemRegion;
import sun.jvm.hotspot.memory.SharedHeap;
import sun.jvm.hotspot.memory.SpaceClosure;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for G1CollectedHeap.
public class G1CollectedHeap extends SharedHeap {
// HeapRegionSeq _seq;
static private long hrsFieldOffset;
// MemRegion _g1_committed;
static private long g1CommittedFieldOffset;
// size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
// G1MonitoringSupport* _g1mm
static private AddressField g1mmField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1CollectedHeap");
hrsFieldOffset = type.getField("_hrs").getOffset();
g1CommittedFieldOffset = type.getField("_g1_committed").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
g1mmField = type.getAddressField("_g1mm");
}
public long capacity() {
Address g1CommittedAddr = addr.addOffsetTo(g1CommittedFieldOffset);
MemRegion g1_committed = new MemRegion(g1CommittedAddr);
return g1_committed.byteSize();
}
public long used() {
return summaryBytesUsedField.getValue(addr);
}
public long n_regions() {
return hrs().length();
}
private HeapRegionSeq hrs() {
Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
hrsAddr);
}
public G1MonitoringSupport g1mm() {
Address g1mmAddr = g1mmField.getValue(addr);
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
}
private Iterator<HeapRegion> heapRegionIterator() {
return hrs().heapRegionIterator();
}
public void heapRegionIterate(SpaceClosure scl) {
Iterator<HeapRegion> iter = heapRegionIterator();
while (iter.hasNext()) {
HeapRegion hr = iter.next();
scl.doSpace(hr);
}
}
public CollectedHeapName kind() {
return CollectedHeapName.G1_COLLECTED_HEAP;
}
public G1CollectedHeap(Address addr) {
super(addr);
}
}
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for G1MonitoringSupport.
public class G1MonitoringSupport extends VMObject {
// size_t _eden_committed;
static private CIntegerField edenCommittedField;
// size_t _eden_used;
static private CIntegerField edenUsedField;
// size_t _survivor_committed;
static private CIntegerField survivorCommittedField;
// size_t _survivor_used;
static private CIntegerField survivorUsedField;
// size_t _old_committed;
static private CIntegerField oldCommittedField;
// size_t _old_used;
static private CIntegerField oldUsedField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1MonitoringSupport");
edenCommittedField = type.getCIntegerField("_eden_committed");
edenUsedField = type.getCIntegerField("_eden_used");
survivorCommittedField = type.getCIntegerField("_survivor_committed");
survivorUsedField = type.getCIntegerField("_survivor_used");
oldCommittedField = type.getCIntegerField("_old_committed");
oldUsedField = type.getCIntegerField("_old_used");
}
public long edenCommitted() {
return edenCommittedField.getValue(addr);
}
public long edenUsed() {
return edenUsedField.getValue(addr);
}
public long survivorCommitted() {
return survivorCommittedField.getValue(addr);
}
public long survivorUsed() {
return survivorUsedField.getValue(addr);
}
public long oldCommitted() {
return oldCommittedField.getValue(addr);
}
public long oldUsed() {
return oldUsedField.getValue(addr);
}
public G1MonitoringSupport(Address addr) {
super(addr);
}
}
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.memory.ContiguousSpace;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegion. Currently we don't actually include
// any of its fields but only iterate over it (which we get "for free"
// as HeapRegion ultimately inherits from ContiguousSpace).
public class HeapRegion extends ContiguousSpace {
// static int GrainBytes;
static private CIntegerField grainBytesField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegion");
grainBytesField = type.getCIntegerField("GrainBytes");
}
static public long grainBytes() {
return grainBytesField.getValue();
}
public HeapRegion(Address addr) {
super(addr);
}
}
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Iterator;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSeq. It's essentially an index -> HeapRegion map.
public class HeapRegionSeq extends VMObject {
// HeapRegion** _regions;
static private AddressField regionsField;
// size_t _length;
static private CIntegerField lengthField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSeq");
regionsField = type.getAddressField("_regions");
lengthField = type.getCIntegerField("_length");
}
private HeapRegion at(long index) {
Address arrayAddr = regionsField.getValue(addr);
// Offset of &_region[index]
long offset = index * VM.getVM().getAddressSize();
Address regionAddr = arrayAddr.getAddressAt(offset);
return (HeapRegion) VMObjectFactory.newObject(HeapRegion.class,
regionAddr);
}
public long length() {
return lengthField.getValue(addr);
}
private class HeapRegionIterator implements Iterator<HeapRegion> {
private long index;
private long length;
@Override
public boolean hasNext() { return index < length; }
@Override
public HeapRegion next() { return at(index++); }
@Override
public void remove() { /* not supported */ }
HeapRegionIterator(Address addr) {
index = 0;
length = length();
}
}
public Iterator<HeapRegion> heapRegionIterator() {
return new HeapRegionIterator(addr);
}
public HeapRegionSeq(Address addr) {
super(addr);
}
}
/*
* Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -34,6 +34,7 @@ public class CollectedHeapName {
public static final CollectedHeapName ABSTRACT = new CollectedHeapName("abstract");
public static final CollectedHeapName SHARED_HEAP = new CollectedHeapName("SharedHeap");
public static final CollectedHeapName GEN_COLLECTED_HEAP = new CollectedHeapName("GenCollectedHeap");
public static final CollectedHeapName G1_COLLECTED_HEAP = new CollectedHeapName("G1CollectedHeap");
public static final CollectedHeapName PARALLEL_SCAVENGE_HEAP = new CollectedHeapName("ParallelScavengeHeap");
public String toString() {
......
/*
* Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -28,6 +28,7 @@ import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc_interface.*;
import sun.jvm.hotspot.gc_implementation.g1.G1CollectedHeap;
import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.types.*;
......@@ -72,6 +73,7 @@ public class Universe {
heapConstructor = new VirtualConstructor(db);
heapConstructor.addMapping("GenCollectedHeap", GenCollectedHeap.class);
heapConstructor.addMapping("ParallelScavengeHeap", ParallelScavengeHeap.class);
heapConstructor.addMapping("G1CollectedHeap", G1CollectedHeap.class);
mainThreadGroupField = type.getOopField("_main_thread_group");
systemThreadGroupField = type.getOopField("_system_thread_group");
......
......@@ -33,6 +33,7 @@ import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.gc_interface.*;
import sun.jvm.hotspot.gc_implementation.g1.*;
import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.runtime.*;
......@@ -514,9 +515,16 @@ public class ObjectHeap {
private void addPermGenLiveRegions(List output, CollectedHeap heap) {
LiveRegionsCollector lrc = new LiveRegionsCollector(output);
if (heap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) heap;
Generation gen = genHeap.permGen();
if (heap instanceof SharedHeap) {
if (Assert.ASSERTS_ENABLED) {
Assert.that(heap instanceof GenCollectedHeap ||
heap instanceof G1CollectedHeap,
"Expecting GenCollectedHeap or G1CollectedHeap, " +
"but got " + heap.getClass().getName());
}
// Handles both GenCollectedHeap and G1CollectedHeap
SharedHeap sharedHeap = (SharedHeap) heap;
Generation gen = sharedHeap.permGen();
gen.spaceIterate(lrc, true);
} else if (heap instanceof ParallelScavengeHeap) {
ParallelScavengeHeap psh = (ParallelScavengeHeap) heap;
......@@ -524,8 +532,9 @@ public class ObjectHeap {
addLiveRegions(permGen.objectSpace().getLiveRegions(), output);
} else {
if (Assert.ASSERTS_ENABLED) {
Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
heap.getClass().getName());
Assert.that(false,
"Expecting SharedHeap or ParallelScavengeHeap, " +
"but got " + heap.getClass().getName());
}
}
}
......@@ -588,10 +597,14 @@ public class ObjectHeap {
addLiveRegions(youngGen.fromSpace().getLiveRegions(), liveRegions);
PSOldGen oldGen = psh.oldGen();
addLiveRegions(oldGen.objectSpace().getLiveRegions(), liveRegions);
} else if (heap instanceof G1CollectedHeap) {
G1CollectedHeap g1h = (G1CollectedHeap) heap;
g1h.heapRegionIterate(lrc);
} else {
if (Assert.ASSERTS_ENABLED) {
Assert.that(false, "Expecting GenCollectedHeap or ParallelScavengeHeap, but got " +
heap.getClass().getName());
Assert.that(false, "Expecting GenCollectedHeap, G1CollectedHeap, " +
"or ParallelScavengeHeap, but got " +
heap.getClass().getName());
}
}
......
/*
* Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -26,11 +26,11 @@ package sun.jvm.hotspot.tools;
import java.util.*;
import sun.jvm.hotspot.gc_interface.*;
import sun.jvm.hotspot.gc_implementation.g1.*;
import sun.jvm.hotspot.gc_implementation.parallelScavenge.*;
import sun.jvm.hotspot.gc_implementation.shared.*;
import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.tools.*;
public class HeapSummary extends Tool {
......@@ -70,32 +70,45 @@ public class HeapSummary extends Tool {
System.out.println();
System.out.println("Heap Usage:");
if (heap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) heap;
for (int n = 0; n < genHeap.nGens(); n++) {
Generation gen = genHeap.getGen(n);
if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
System.out.println("New Generation (Eden + 1 Survivor Space):");
printGen(gen);
ContiguousSpace eden = ((DefNewGeneration)gen).eden();
System.out.println("Eden Space:");
printSpace(eden);
ContiguousSpace from = ((DefNewGeneration)gen).from();
System.out.println("From Space:");
printSpace(from);
ContiguousSpace to = ((DefNewGeneration)gen).to();
System.out.println("To Space:");
printSpace(to);
} else {
System.out.println(gen.name() + ":");
printGen(gen);
if (heap instanceof SharedHeap) {
SharedHeap sharedHeap = (SharedHeap) heap;
if (sharedHeap instanceof GenCollectedHeap) {
GenCollectedHeap genHeap = (GenCollectedHeap) sharedHeap;
for (int n = 0; n < genHeap.nGens(); n++) {
Generation gen = genHeap.getGen(n);
if (gen instanceof sun.jvm.hotspot.memory.DefNewGeneration) {
System.out.println("New Generation (Eden + 1 Survivor Space):");
printGen(gen);
ContiguousSpace eden = ((DefNewGeneration)gen).eden();
System.out.println("Eden Space:");
printSpace(eden);
ContiguousSpace from = ((DefNewGeneration)gen).from();
System.out.println("From Space:");
printSpace(from);
ContiguousSpace to = ((DefNewGeneration)gen).to();
System.out.println("To Space:");
printSpace(to);
} else {
System.out.println(gen.name() + ":");
printGen(gen);
}
}
} else if (sharedHeap instanceof G1CollectedHeap) {
G1CollectedHeap g1h = (G1CollectedHeap) sharedHeap;
G1MonitoringSupport g1mm = g1h.g1mm();
System.out.println("G1 Young Generation");
printG1Space("Eden Space:", g1mm.edenUsed(), g1mm.edenCommitted());
printG1Space("From Space:", g1mm.survivorUsed(), g1mm.survivorCommitted());
printG1Space("To Space:", 0, 0);
printG1Space("G1 Old Generation", g1mm.oldUsed(), g1mm.oldCommitted());
} else {
throw new RuntimeException("unknown SharedHeap type : " + heap.getClass());
}
// Perm generation
Generation permGen = genHeap.permGen();
// Perm generation shared by the above
Generation permGen = sharedHeap.permGen();
System.out.println("Perm Generation:");
printGen(permGen);
} else if (heap instanceof ParallelScavengeHeap) {
......@@ -119,7 +132,7 @@ public class HeapSummary extends Tool {
printValMB("free = ", permFree);
System.out.println(alignment + (double)permGen.used() * 100.0 / permGen.capacity() + "% used");
} else {
throw new RuntimeException("unknown heap type : " + heap.getClass());
throw new RuntimeException("unknown CollectedHeap type : " + heap.getClass());
}
}
......@@ -151,6 +164,14 @@ public class HeapSummary extends Tool {
return;
}
l = getFlagValue("UseG1GC", flagMap);
if (l == 1L) {
System.out.print("Garbage-First (G1) GC ");
l = getFlagValue("ParallelGCThreads", flagMap);
System.out.println("with " + l + " thread(s)");
return;
}
System.out.println("Mark Sweep Compact GC");
}
......@@ -191,6 +212,16 @@ public class HeapSummary extends Tool {
System.out.println(alignment + (double)space.used() * 100.0 / space.capacity() + "% used");
}
private void printG1Space(String spaceName, long used, long capacity) {
long free = capacity - used;
System.out.println(spaceName);
printValMB("capacity = ", capacity);
printValMB("used = ", used);
printValMB("free = ", free);
double occPerc = (capacity > 0) ? (double) used * 100.0 / capacity : 0.0;
System.out.println(alignment + occPerc + "% used");
}
private static final double FACTOR = 1024*1024;
private void printValMB(String title, long value) {
if (value < 0) {
......
#
# Copyright (c) 2003, 2008, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -82,6 +82,7 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/windbg/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/g1/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/parallelScavenge/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_implementation/shared/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/gc_interface/*.java \
......
......@@ -2004,7 +2004,7 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
ReferenceProcessorMTDiscoveryMutator rp_mut_discovery(ref_processor(), false);
ref_processor()->set_enqueuing_is_done(false);
ref_processor()->enable_discovery();
ref_processor()->enable_discovery(false /*verify_disabled*/, false /*check_no_refs*/);
ref_processor()->setup_policy(clear_all_soft_refs);
// If an asynchronous collection finishes, the _modUnionTable is
// all clear. If we are assuming the collection from an asynchronous
......@@ -3490,8 +3490,8 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
MutexLockerEx x(bitMapLock(),
Mutex::_no_safepoint_check_flag);
checkpointRootsInitialWork(asynch);
rp->verify_no_references_recorded();
rp->enable_discovery(); // enable ("weak") refs discovery
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_disabled*/, true /*check_no_refs*/);
_collectorState = Marking;
} else {
// (Weak) Refs discovery: this is controlled from genCollectedHeap::do_collection
......@@ -3503,7 +3503,8 @@ void CMSCollector::checkpointRootsInitial(bool asynch) {
"ref discovery for this generation kind");
// already have locks
checkpointRootsInitialWork(asynch);
rp->enable_discovery(); // now enable ("weak") refs discovery
// now enable ("weak") refs discovery
rp->enable_discovery(true /*verify_disabled*/, false /*verify_no_refs*/);
_collectorState = Marking;
}
SpecializationStats::print();
......
......@@ -818,10 +818,10 @@ void ConcurrentMark::checkpointRootsInitialPost() {
NoteStartOfMarkHRClosure startcl;
g1h->heap_region_iterate(&startcl);
// Start weak-reference discovery.
ReferenceProcessor* rp = g1h->ref_processor();
rp->verify_no_references_recorded();
rp->enable_discovery(); // enable ("weak") refs discovery
// Start Concurrent Marking weak-reference discovery.
ReferenceProcessor* rp = g1h->ref_processor_cm();
// enable ("weak") refs discovery
rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
......@@ -1133,6 +1133,7 @@ void ConcurrentMark::checkpointRootsFinal(bool clear_all_soft_refs) {
// world is stopped at this checkpoint
assert(SafepointSynchronize::is_at_safepoint(),
"world should be stopped");
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// If a full collection has happened, we shouldn't do this.
......@@ -1837,6 +1838,10 @@ void ConcurrentMark::cleanup() {
size_t cleaned_up_bytes = start_used_bytes - g1h->used();
g1p->decrease_known_garbage_bytes(cleaned_up_bytes);
// Clean up will have freed any regions completely full of garbage.
// Update the soft reference policy with the new heap occupancy.
Universe::update_heap_info_at_gc();
// We need to make this be a "collection" so any collection pause that
// races with it goes around and waits for completeCleanup to finish.
g1h->increment_total_collections();
......@@ -2072,8 +2077,10 @@ class G1CMParDrainMarkingStackClosure: public VoidClosure {
}
};
// Implementation of AbstractRefProcTaskExecutor for G1
class G1RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
// Implementation of AbstractRefProcTaskExecutor for parallel
// reference processing at the end of G1 concurrent marking
class G1CMRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
private:
G1CollectedHeap* _g1h;
ConcurrentMark* _cm;
......@@ -2082,7 +2089,7 @@ private:
int _active_workers;
public:
G1RefProcTaskExecutor(G1CollectedHeap* g1h,
G1CMRefProcTaskExecutor(G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMBitMap* bitmap,
WorkGang* workers,
......@@ -2096,7 +2103,7 @@ public:
virtual void execute(EnqueueTask& task);
};
class G1RefProcTaskProxy: public AbstractGangTask {
class G1CMRefProcTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
ProcessTask& _proc_task;
G1CollectedHeap* _g1h;
......@@ -2104,7 +2111,7 @@ class G1RefProcTaskProxy: public AbstractGangTask {
CMBitMap* _bitmap;
public:
G1RefProcTaskProxy(ProcessTask& proc_task,
G1CMRefProcTaskProxy(ProcessTask& proc_task,
G1CollectedHeap* g1h,
ConcurrentMark* cm,
CMBitMap* bitmap) :
......@@ -2122,10 +2129,10 @@ public:
}
};
void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
void G1CMRefProcTaskExecutor::execute(ProcessTask& proc_task) {
assert(_workers != NULL, "Need parallel worker threads.");
G1RefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
G1CMRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _cm, _bitmap);
// We need to reset the phase for each task execution so that
// the termination protocol of CMTask::do_marking_step works.
......@@ -2135,12 +2142,12 @@ void G1RefProcTaskExecutor::execute(ProcessTask& proc_task) {
_g1h->set_par_threads(0);
}
class G1RefEnqueueTaskProxy: public AbstractGangTask {
class G1CMRefEnqueueTaskProxy: public AbstractGangTask {
typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
EnqueueTask& _enq_task;
public:
G1RefEnqueueTaskProxy(EnqueueTask& enq_task) :
G1CMRefEnqueueTaskProxy(EnqueueTask& enq_task) :
AbstractGangTask("Enqueue reference objects in parallel"),
_enq_task(enq_task)
{ }
......@@ -2150,10 +2157,10 @@ public:
}
};
void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
void G1CMRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
assert(_workers != NULL, "Need parallel worker threads.");
G1RefEnqueueTaskProxy enq_task_proxy(enq_task);
G1CMRefEnqueueTaskProxy enq_task_proxy(enq_task);
_g1h->set_par_threads(_active_workers);
_workers->run_task(&enq_task_proxy);
......@@ -2163,71 +2170,84 @@ void G1RefProcTaskExecutor::execute(EnqueueTask& enq_task) {
void ConcurrentMark::weakRefsWork(bool clear_all_soft_refs) {
ResourceMark rm;
HandleMark hm;
G1CollectedHeap* g1h = G1CollectedHeap::heap();
ReferenceProcessor* rp = g1h->ref_processor();
// See the comment in G1CollectedHeap::ref_processing_init()
// about how reference processing currently works in G1.
// Process weak references.
rp->setup_policy(clear_all_soft_refs);
assert(_markStack.isEmpty(), "mark stack should be empty");
G1CMIsAliveClosure g1_is_alive(g1h);
G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
G1CMDrainMarkingStackClosure
g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
// We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads.
int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
G1RefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
g1h->workers(), active_workers);
if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_active_mt_degree(active_workers);
rp->process_discovered_references(&g1_is_alive,
G1CollectedHeap* g1h = G1CollectedHeap::heap();
// Is alive closure.
G1CMIsAliveClosure g1_is_alive(g1h);
// Inner scope to exclude the cleaning of the string and symbol
// tables from the displayed time.
{
bool verbose = PrintGC && PrintGCDetails;
if (verbose) {
gclog_or_tty->put(' ');
}
TraceTime t("GC ref-proc", verbose, false, gclog_or_tty);
ReferenceProcessor* rp = g1h->ref_processor_cm();
// See the comment in G1CollectedHeap::ref_processing_init()
// about how reference processing currently works in G1.
// Process weak references.
rp->setup_policy(clear_all_soft_refs);
assert(_markStack.isEmpty(), "mark stack should be empty");
G1CMKeepAliveClosure g1_keep_alive(g1h, this, nextMarkBitMap());
G1CMDrainMarkingStackClosure
g1_drain_mark_stack(nextMarkBitMap(), &_markStack, &g1_keep_alive);
// We use the work gang from the G1CollectedHeap and we utilize all
// the worker threads.
int active_workers = g1h->workers() ? g1h->workers()->total_workers() : 1;
active_workers = MAX2(MIN2(active_workers, (int)_max_task_num), 1);
G1CMRefProcTaskExecutor par_task_executor(g1h, this, nextMarkBitMap(),
g1h->workers(), active_workers);
if (rp->processing_is_mt()) {
// Set the degree of MT here. If the discovery is done MT, there
// may have been a different number of threads doing the discovery
// and a different number of discovered lists may have Ref objects.
// That is OK as long as the Reference lists are balanced (see
// balance_all_queues() and balance_queues()).
rp->set_active_mt_degree(active_workers);
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
&g1_drain_mark_stack,
&par_task_executor);
// The work routines of the parallel keep_alive and drain_marking_stack
// will set the has_overflown flag if we overflow the global marking
// stack.
} else {
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
&g1_drain_mark_stack,
NULL);
// The work routines of the parallel keep_alive and drain_marking_stack
// will set the has_overflown flag if we overflow the global marking
// stack.
} else {
rp->process_discovered_references(&g1_is_alive,
&g1_keep_alive,
&g1_drain_mark_stack,
NULL);
}
}
assert(_markStack.overflow() || _markStack.isEmpty(),
"mark stack should be empty (unless it overflowed)");
if (_markStack.overflow()) {
// Should have been done already when we tried to push an
// entry on to the global mark stack. But let's do it again.
set_has_overflown();
}
assert(_markStack.overflow() || _markStack.isEmpty(),
"mark stack should be empty (unless it overflowed)");
if (_markStack.overflow()) {
// Should have been done already when we tried to push an
// entry on to the global mark stack. But let's do it again.
set_has_overflown();
}
if (rp->processing_is_mt()) {
assert(rp->num_q() == active_workers, "why not");
rp->enqueue_discovered_references(&par_task_executor);
} else {
rp->enqueue_discovered_references();
}
if (rp->processing_is_mt()) {
assert(rp->num_q() == active_workers, "why not");
rp->enqueue_discovered_references(&par_task_executor);
} else {
rp->enqueue_discovered_references();
rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "Post condition");
}
rp->verify_no_references_recorded();
assert(!rp->discovery_enabled(), "should have been disabled");
// Now clean up stale oops in StringTable
StringTable::unlink(&g1_is_alive);
// Clean up unreferenced symbols in symbol table.
......@@ -3329,7 +3349,7 @@ G1CMOopClosure::G1CMOopClosure(G1CollectedHeap* g1h,
assert(_ref_processor == NULL, "should be initialized to NULL");
if (G1UseConcMarkReferenceProcessing) {
_ref_processor = g1h->ref_processor();
_ref_processor = g1h->ref_processor_cm();
assert(_ref_processor != NULL, "should not be NULL");
}
}
......@@ -4564,6 +4584,15 @@ G1PrintRegionLivenessInfoClosure(outputStream* out, const char* phase_name)
G1PPRL_DOUBLE_H_FORMAT,
"type", "address-range",
"used", "prev-live", "next-live", "gc-eff");
_out->print_cr(G1PPRL_LINE_PREFIX
G1PPRL_TYPE_H_FORMAT
G1PPRL_ADDR_BASE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_BYTE_H_FORMAT
G1PPRL_DOUBLE_H_FORMAT,
"", "",
"(bytes)", "(bytes)", "(bytes)", "(bytes/ms)");
}
// It takes as a parameter a reference to one of the _hum_* fields, it
......@@ -4575,7 +4604,7 @@ size_t G1PrintRegionLivenessInfoClosure::get_hum_bytes(size_t* hum_bytes) {
// The > 0 check is to deal with the prev and next live bytes which
// could be 0.
if (*hum_bytes > 0) {
bytes = MIN2((size_t) HeapRegion::GrainBytes, *hum_bytes);
bytes = MIN2(HeapRegion::GrainBytes, *hum_bytes);
*hum_bytes -= bytes;
}
return bytes;
......
......@@ -366,8 +366,8 @@ class ConcurrentMark: public CHeapObj {
friend class CMConcurrentMarkingTask;
friend class G1ParNoteEndTask;
friend class CalcLiveObjectsClosure;
friend class G1RefProcTaskProxy;
friend class G1RefProcTaskExecutor;
friend class G1CMRefProcTaskProxy;
friend class G1CMRefProcTaskExecutor;
friend class G1CMParKeepAliveAndDrainClosure;
friend class G1CMParDrainMarkingStackClosure;
......
......@@ -155,6 +155,19 @@ public:
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
// reference processor. It is also extensively used during
// refence processing during STW evacuation pauses.
class G1STWIsAliveClosure: public BoolObjectClosure {
G1CollectedHeap* _g1;
public:
G1STWIsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
void do_object(oop p) { assert(false, "Do not call."); }
bool do_object_b(oop p);
};
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
......@@ -174,6 +187,7 @@ public:
};
class RefineCardTableEntryClosure;
class G1CollectedHeap : public SharedHeap {
friend class VM_G1CollectForAllocation;
friend class VM_GenCollectForPermanentAllocation;
......@@ -573,9 +587,20 @@ protected:
// allocated block, or else "NULL".
HeapWord* expand_and_allocate(size_t word_size);
// Process any reference objects discovered during
// an incremental evacuation pause.
void process_discovered_references();
// Enqueue any remaining discovered references
// after processing.
void enqueue_discovered_references();
public:
G1MonitoringSupport* g1mm() { return _g1mm; }
G1MonitoringSupport* g1mm() {
assert(_g1mm != NULL, "should have been initialized");
return _g1mm;
}
// Expand the garbage-first heap by at least the given size (in bytes!).
// Returns true if the heap was expanded by the requested amount;
......@@ -822,17 +847,87 @@ protected:
void finalize_for_evac_failure();
// An attempt to evacuate "obj" has failed; take necessary steps.
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj);
oop handle_evacuation_failure_par(OopsInHeapRegionClosure* cl, oop obj,
bool should_mark_root);
void handle_evacuation_failure_common(oop obj, markOop m);
// Instance of the concurrent mark is_alive closure for embedding
// into the reference processor as the is_alive_non_header. This
// prevents unnecessary additions to the discovered lists during
// concurrent discovery.
G1CMIsAliveClosure _is_alive_closure;
// ("Weak") Reference processing support.
//
// G1 has 2 instances of the referece processor class. One
// (_ref_processor_cm) handles reference object discovery
// and subsequent processing during concurrent marking cycles.
//
// The other (_ref_processor_stw) handles reference object
// discovery and processing during full GCs and incremental
// evacuation pauses.
//
// During an incremental pause, reference discovery will be
// temporarily disabled for _ref_processor_cm and will be
// enabled for _ref_processor_stw. At the end of the evacuation
// pause references discovered by _ref_processor_stw will be
// processed and discovery will be disabled. The previous
// setting for reference object discovery for _ref_processor_cm
// will be re-instated.
//
// At the start of marking:
// * Discovery by the CM ref processor is verified to be inactive
// and it's discovered lists are empty.
// * Discovery by the CM ref processor is then enabled.
//
// At the end of marking:
// * Any references on the CM ref processor's discovered
// lists are processed (possibly MT).
//
// At the start of full GC we:
// * Disable discovery by the CM ref processor and
// empty CM ref processor's discovered lists
// (without processing any entries).
// * Verify that the STW ref processor is inactive and it's
// discovered lists are empty.
// * Temporarily set STW ref processor discovery as single threaded.
// * Temporarily clear the STW ref processor's _is_alive_non_header
// field.
// * Finally enable discovery by the STW ref processor.
//
// The STW ref processor is used to record any discovered
// references during the full GC.
//
// At the end of a full GC we:
// * Enqueue any reference objects discovered by the STW ref processor
// that have non-live referents. This has the side-effect of
// making the STW ref processor inactive by disabling discovery.
// * Verify that the CM ref processor is still inactive
// and no references have been placed on it's discovered
// lists (also checked as a precondition during initial marking).
// The (stw) reference processor...
ReferenceProcessor* _ref_processor_stw;
// During reference object discovery, the _is_alive_non_header
// closure (if non-null) is applied to the referent object to
// determine whether the referent is live. If so then the
// reference object does not need to be 'discovered' and can
// be treated as a regular oop. This has the benefit of reducing
// the number of 'discovered' reference objects that need to
// be processed.
//
// Instance of the is_alive closure for embedding into the
// STW reference processor as the _is_alive_non_header field.
// Supplying a value for the _is_alive_non_header field is
// optional but doing so prevents unnecessary additions to
// the discovered lists during reference discovery.
G1STWIsAliveClosure _is_alive_closure_stw;
// The (concurrent marking) reference processor...
ReferenceProcessor* _ref_processor_cm;
// ("Weak") Reference processing support
ReferenceProcessor* _ref_processor;
// Instance of the concurrent mark is_alive closure for embedding
// into the Concurrent Marking reference processor as the
// _is_alive_non_header field. Supplying a value for the
// _is_alive_non_header field is optional but doing so prevents
// unnecessary additions to the discovered lists during reference
// discovery.
G1CMIsAliveClosure _is_alive_closure_cm;
enum G1H_process_strong_roots_tasks {
G1H_PS_mark_stack_oops_do,
......@@ -873,6 +968,7 @@ public:
// specified by the policy object.
jint initialize();
// Initialize weak reference processing.
virtual void ref_processing_init();
void set_par_threads(int t) {
......@@ -924,8 +1020,13 @@ public:
// The shared block offset table array.
G1BlockOffsetSharedArray* bot_shared() const { return _bot_shared; }
// Reference Processing accessor
ReferenceProcessor* ref_processor() { return _ref_processor; }
// Reference Processing accessors
// The STW reference processor....
ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; }
// The Concurent Marking reference processor...
ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; }
virtual size_t capacity() const;
virtual size_t used() const;
......
......@@ -152,8 +152,12 @@ G1CollectorPolicy::G1CollectorPolicy() :
_summary(new Summary()),
#ifndef PRODUCT
_cur_clear_ct_time_ms(0.0),
_cur_ref_proc_time_ms(0.0),
_cur_ref_enq_time_ms(0.0),
#ifndef PRODUCT
_min_clear_cc_time_ms(-1.0),
_max_clear_cc_time_ms(-1.0),
_cur_clear_cc_time_ms(0.0),
......@@ -294,10 +298,10 @@ G1CollectorPolicy::G1CollectorPolicy() :
}
// Verify PLAB sizes
const uint region_size = HeapRegion::GrainWords;
const size_t region_size = HeapRegion::GrainWords;
if (YoungPLABSize > region_size || OldPLABSize > region_size) {
char buffer[128];
jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most %u",
jio_snprintf(buffer, sizeof(buffer), "%sPLABSize should be at most "SIZE_FORMAT,
OldPLABSize > region_size ? "Old" : "Young", region_size);
vm_exit_during_initialization(buffer);
}
......@@ -459,15 +463,16 @@ void G1CollectorPolicy::initialize_flags() {
// ParallelScavengeHeap::initialize()). We might change this in the
// future, but it's a good start.
class G1YoungGenSizer : public TwoGenerationCollectorPolicy {
private:
size_t size_to_region_num(size_t byte_size) {
return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
}
public:
G1YoungGenSizer() {
initialize_flags();
initialize_size_info();
}
size_t size_to_region_num(size_t byte_size) {
return MAX2((size_t) 1, byte_size / HeapRegion::GrainBytes);
}
size_t min_young_region_num() {
return size_to_region_num(_min_gen0_size);
}
......@@ -501,11 +506,10 @@ void G1CollectorPolicy::init() {
if (FLAG_IS_CMDLINE(NewRatio)) {
if (FLAG_IS_CMDLINE(NewSize) || FLAG_IS_CMDLINE(MaxNewSize)) {
gclog_or_tty->print_cr("-XX:NewSize and -XX:MaxNewSize overrides -XX:NewRatio");
warning("-XX:NewSize and -XX:MaxNewSize override -XX:NewRatio");
} else {
// Treat NewRatio as a fixed size that is only recalculated when the heap size changes
size_t heap_regions = sizer.size_to_region_num(_g1->n_regions());
update_young_list_size_using_newratio(heap_regions);
update_young_list_size_using_newratio(_g1->n_regions());
_using_new_ratio_calculations = true;
}
}
......@@ -1479,6 +1483,8 @@ void G1CollectorPolicy::record_collection_pause_end() {
#endif
print_stats(1, "Other", other_time_ms);
print_stats(2, "Choose CSet", _recorded_young_cset_choice_time_ms);
print_stats(2, "Ref Proc", _cur_ref_proc_time_ms);
print_stats(2, "Ref Enq", _cur_ref_enq_time_ms);
for (int i = 0; i < _aux_num; ++i) {
if (_cur_aux_times_set[i]) {
......@@ -1519,11 +1525,17 @@ void G1CollectorPolicy::record_collection_pause_end() {
}
if (_last_full_young_gc) {
ergo_verbose2(ErgoPartiallyYoungGCs,
"start partially-young GCs",
ergo_format_byte_perc("known garbage"),
_known_garbage_bytes, _known_garbage_ratio * 100.0);
set_full_young_gcs(false);
if (!last_pause_included_initial_mark) {
ergo_verbose2(ErgoPartiallyYoungGCs,
"start partially-young GCs",
ergo_format_byte_perc("known garbage"),
_known_garbage_bytes, _known_garbage_ratio * 100.0);
set_full_young_gcs(false);
} else {
ergo_verbose0(ErgoPartiallyYoungGCs,
"do not start partially-young GCs",
ergo_format_reason("concurrent cycle is about to start"));
}
_last_full_young_gc = false;
}
......@@ -2485,6 +2497,13 @@ G1CollectorPolicy::decide_on_conc_mark_initiation() {
// initiate a new cycle.
set_during_initial_mark_pause();
// We do not allow non-full young GCs during marking.
if (!full_young_gcs()) {
set_full_young_gcs(true);
ergo_verbose0(ErgoPartiallyYoungGCs,
"end partially-young GCs",
ergo_format_reason("concurrent cycle is about to start"));
}
// And we can now clear initiate_conc_mark_if_possible() as
// we've already acted on it.
......
......@@ -119,6 +119,8 @@ protected:
double _cur_satb_drain_time_ms;
double _cur_clear_ct_time_ms;
bool _satb_drain_time_set;
double _cur_ref_proc_time_ms;
double _cur_ref_enq_time_ms;
#ifndef PRODUCT
// Card Table Count Cache stats
......@@ -986,6 +988,14 @@ public:
_cur_aux_times_ms[i] += ms;
}
void record_ref_proc_time(double ms) {
_cur_ref_proc_time_ms = ms;
}
void record_ref_enq_time(double ms) {
_cur_ref_enq_time_ms = ms;
}
#ifndef PRODUCT
void record_cc_clear_time(double ms) {
if (_min_clear_cc_time_ms < 0.0 || ms <= _min_clear_cc_time_ms)
......@@ -1139,6 +1149,10 @@ public:
return young_list_length < young_list_max_length;
}
size_t young_list_max_length() {
return _young_list_max_length;
}
void update_region_num(bool young);
bool full_young_gcs() {
......
......@@ -62,6 +62,8 @@ void G1MarkSweep::invoke_at_safepoint(ReferenceProcessor* rp,
// hook up weak ref data so it can be used during Mark-Sweep
assert(GenMarkSweep::ref_processor() == NULL, "no stomping");
assert(rp != NULL, "should be non-NULL");
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Precondition");
GenMarkSweep::_ref_processor = rp;
rp->setup_policy(clear_all_softrefs);
......@@ -139,6 +141,8 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
// Process reference objects found during marking
ReferenceProcessor* rp = GenMarkSweep::ref_processor();
assert(rp == G1CollectedHeap::heap()->ref_processor_stw(), "Sanity");
rp->setup_policy(clear_all_softrefs);
rp->process_discovered_references(&GenMarkSweep::is_alive,
&GenMarkSweep::keep_alive,
......@@ -166,7 +170,6 @@ void G1MarkSweep::mark_sweep_phase1(bool& marked_for_unloading,
GenMarkSweep::follow_mdo_weak_refs();
assert(GenMarkSweep::_marking_stack.is_empty(), "just drained");
// Visit interned string tables and delete unmarked oops
StringTable::unlink(&GenMarkSweep::is_alive);
// Clean up unreferenced symbols in symbol table.
......@@ -346,7 +349,8 @@ void G1MarkSweep::mark_sweep_phase3() {
NULL, // do not touch code cache here
&GenMarkSweep::adjust_pointer_closure);
g1h->ref_processor()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
assert(GenMarkSweep::ref_processor() == g1h->ref_processor_stw(), "Sanity");
g1h->ref_processor_stw()->weak_oops_do(&GenMarkSweep::adjust_root_pointer_closure);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)
......
......@@ -27,19 +27,69 @@
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
VirtualSpace* g1_storage_addr) :
G1GenerationCounters::G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name,
int ordinal, int spaces,
size_t min_capacity,
size_t max_capacity,
size_t curr_capacity)
: GenerationCounters(name, ordinal, spaces, min_capacity,
max_capacity, curr_capacity), _g1mm(g1mm) { }
// We pad the capacity three times given that the young generation
// contains three spaces (eden and two survivors).
G1YoungGenerationCounters::G1YoungGenerationCounters(G1MonitoringSupport* g1mm,
const char* name)
: G1GenerationCounters(g1mm, name, 0 /* ordinal */, 3 /* spaces */,
G1MonitoringSupport::pad_capacity(0, 3) /* min_capacity */,
G1MonitoringSupport::pad_capacity(g1mm->young_gen_max(), 3),
G1MonitoringSupport::pad_capacity(0, 3) /* curr_capacity */) {
update_all();
}
G1OldGenerationCounters::G1OldGenerationCounters(G1MonitoringSupport* g1mm,
const char* name)
: G1GenerationCounters(g1mm, name, 1 /* ordinal */, 1 /* spaces */,
G1MonitoringSupport::pad_capacity(0) /* min_capacity */,
G1MonitoringSupport::pad_capacity(g1mm->old_gen_max()),
G1MonitoringSupport::pad_capacity(0) /* curr_capacity */) {
update_all();
}
void G1YoungGenerationCounters::update_all() {
size_t committed =
G1MonitoringSupport::pad_capacity(_g1mm->young_gen_committed(), 3);
_current_size->set_value(committed);
}
void G1OldGenerationCounters::update_all() {
size_t committed =
G1MonitoringSupport::pad_capacity(_g1mm->old_gen_committed());
_current_size->set_value(committed);
}
G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h) :
_g1h(g1h),
_incremental_collection_counters(NULL),
_full_collection_counters(NULL),
_non_young_collection_counters(NULL),
_old_collection_counters(NULL),
_old_space_counters(NULL),
_young_collection_counters(NULL),
_eden_counters(NULL),
_from_counters(NULL),
_to_counters(NULL),
_g1_storage_addr(g1_storage_addr)
{
_overall_reserved(0),
_overall_committed(0), _overall_used(0),
_young_region_num(0),
_young_gen_committed(0),
_eden_committed(0), _eden_used(0),
_survivor_committed(0), _survivor_used(0),
_old_committed(0), _old_used(0) {
_overall_reserved = g1h->max_capacity();
recalculate_sizes();
// Counters for GC collections
//
// name "collector.0". In a generational collector this would be the
......@@ -69,110 +119,147 @@ G1MonitoringSupport::G1MonitoringSupport(G1CollectedHeap* g1h,
// generational GC terms. The "1, 1" parameters are for
// the n-th generation (=1) with 1 space.
// Counters are created from minCapacity, maxCapacity, and capacity
_non_young_collection_counters =
new GenerationCounters("whole heap", 1, 1, _g1_storage_addr);
_old_collection_counters = new G1OldGenerationCounters(this, "old");
// name "generation.1.space.0"
// Counters are created from maxCapacity, capacity, initCapacity,
// and used.
_old_space_counters = new HSpaceCounters("space", 0,
_g1h->max_capacity(), _g1h->capacity(), _non_young_collection_counters);
_old_space_counters = new HSpaceCounters("space", 0 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
pad_capacity(old_space_committed()) /* init_capacity */,
_old_collection_counters);
// Young collection set
// name "generation.0". This is logically the young generation.
// The "0, 3" are paremeters for the n-th genertaion (=0) with 3 spaces.
// See _non_young_collection_counters for additional counters
_young_collection_counters = new GenerationCounters("young", 0, 3, NULL);
// See _old_collection_counters for additional counters
_young_collection_counters = new G1YoungGenerationCounters(this, "young");
// Replace "max_heap_byte_size() with maximum young gen size for
// g1Collectedheap
// name "generation.0.space.0"
// See _old_space_counters for additional counters
_eden_counters = new HSpaceCounters("eden", 0,
_g1h->max_capacity(), eden_space_committed(),
_eden_counters = new HSpaceCounters("eden", 0 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
pad_capacity(eden_space_committed()) /* init_capacity */,
_young_collection_counters);
// name "generation.0.space.1"
// See _old_space_counters for additional counters
// Set the arguments to indicate that this survivor space is not used.
_from_counters = new HSpaceCounters("s0", 1, (long) 0, (long) 0,
_from_counters = new HSpaceCounters("s0", 1 /* ordinal */,
pad_capacity(0) /* max_capacity */,
pad_capacity(0) /* init_capacity */,
_young_collection_counters);
// Given that this survivor space is not used, we update it here
// once to reflect that its used space is 0 so that we don't have to
// worry about updating it again later.
_from_counters->update_used(0);
// name "generation.0.space.2"
// See _old_space_counters for additional counters
_to_counters = new HSpaceCounters("s1", 2,
_g1h->max_capacity(),
survivor_space_committed(),
_to_counters = new HSpaceCounters("s1", 2 /* ordinal */,
pad_capacity(overall_reserved()) /* max_capacity */,
pad_capacity(survivor_space_committed()) /* init_capacity */,
_young_collection_counters);
}
size_t G1MonitoringSupport::overall_committed() {
return g1h()->capacity();
}
void G1MonitoringSupport::recalculate_sizes() {
G1CollectedHeap* g1 = g1h();
size_t G1MonitoringSupport::overall_used() {
return g1h()->used_unlocked();
}
// Recalculate all the sizes from scratch. We assume that this is
// called at a point where no concurrent updates to the various
// values we read here are possible (i.e., at a STW phase at the end
// of a GC).
size_t G1MonitoringSupport::eden_space_committed() {
return MAX2(eden_space_used(), (size_t) HeapRegion::GrainBytes);
}
size_t young_list_length = g1->young_list()->length();
size_t survivor_list_length = g1->g1_policy()->recorded_survivor_regions();
assert(young_list_length >= survivor_list_length, "invariant");
size_t eden_list_length = young_list_length - survivor_list_length;
// Max length includes any potential extensions to the young gen
// we'll do when the GC locker is active.
size_t young_list_max_length = g1->g1_policy()->young_list_max_length();
assert(young_list_max_length >= survivor_list_length, "invariant");
size_t eden_list_max_length = young_list_max_length - survivor_list_length;
size_t G1MonitoringSupport::eden_space_used() {
size_t young_list_length = g1h()->young_list()->length();
size_t eden_used = young_list_length * HeapRegion::GrainBytes;
size_t survivor_used = survivor_space_used();
eden_used = subtract_up_to_zero(eden_used, survivor_used);
return eden_used;
}
_overall_used = g1->used_unlocked();
_eden_used = eden_list_length * HeapRegion::GrainBytes;
_survivor_used = survivor_list_length * HeapRegion::GrainBytes;
_young_region_num = young_list_length;
_old_used = subtract_up_to_zero(_overall_used, _eden_used + _survivor_used);
size_t G1MonitoringSupport::survivor_space_committed() {
return MAX2(survivor_space_used(),
(size_t) HeapRegion::GrainBytes);
}
// First calculate the committed sizes that can be calculated independently.
_survivor_committed = _survivor_used;
_old_committed = HeapRegion::align_up_to_region_byte_size(_old_used);
size_t G1MonitoringSupport::survivor_space_used() {
size_t survivor_num = g1h()->g1_policy()->recorded_survivor_regions();
size_t survivor_used = survivor_num * HeapRegion::GrainBytes;
return survivor_used;
}
// Next, start with the overall committed size.
_overall_committed = g1->capacity();
size_t committed = _overall_committed;
size_t G1MonitoringSupport::old_space_committed() {
size_t committed = overall_committed();
size_t eden_committed = eden_space_committed();
size_t survivor_committed = survivor_space_committed();
committed = subtract_up_to_zero(committed, eden_committed);
committed = subtract_up_to_zero(committed, survivor_committed);
committed = MAX2(committed, (size_t) HeapRegion::GrainBytes);
return committed;
// Remove the committed size we have calculated so far (for the
// survivor and old space).
assert(committed >= (_survivor_committed + _old_committed), "sanity");
committed -= _survivor_committed + _old_committed;
// Next, calculate and remove the committed size for the eden.
_eden_committed = eden_list_max_length * HeapRegion::GrainBytes;
// Somewhat defensive: be robust in case there are inaccuracies in
// the calculations
_eden_committed = MIN2(_eden_committed, committed);
committed -= _eden_committed;
// Finally, give the rest to the old space...
_old_committed += committed;
// ..and calculate the young gen committed.
_young_gen_committed = _eden_committed + _survivor_committed;
assert(_overall_committed ==
(_eden_committed + _survivor_committed + _old_committed),
"the committed sizes should add up");
// Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size.
_eden_used = MIN2(_eden_used, _eden_committed);
// _survivor_committed and _old_committed are calculated in terms of
// the corresponding _*_used value, so the next two conditions
// should hold.
assert(_survivor_used <= _survivor_committed, "post-condition");
assert(_old_used <= _old_committed, "post-condition");
}
// See the comment near the top of g1MonitoringSupport.hpp for
// an explanation of these calculations for "used" and "capacity".
size_t G1MonitoringSupport::old_space_used() {
size_t used = overall_used();
size_t eden_used = eden_space_used();
size_t survivor_used = survivor_space_used();
used = subtract_up_to_zero(used, eden_used);
used = subtract_up_to_zero(used, survivor_used);
return used;
void G1MonitoringSupport::recalculate_eden_size() {
G1CollectedHeap* g1 = g1h();
// When a new eden region is allocated, only the eden_used size is
// affected (since we have recalculated everything else at the last GC).
size_t young_region_num = g1h()->young_list()->length();
if (young_region_num > _young_region_num) {
size_t diff = young_region_num - _young_region_num;
_eden_used += diff * HeapRegion::GrainBytes;
// Somewhat defensive: cap the eden used size to make sure it
// never exceeds the committed size.
_eden_used = MIN2(_eden_used, _eden_committed);
_young_region_num = young_region_num;
}
}
void G1MonitoringSupport::update_counters() {
void G1MonitoringSupport::update_sizes() {
recalculate_sizes();
if (UsePerfData) {
eden_counters()->update_capacity(eden_space_committed());
eden_counters()->update_capacity(pad_capacity(eden_space_committed()));
eden_counters()->update_used(eden_space_used());
to_counters()->update_capacity(survivor_space_committed());
// only the to survivor space (s1) is active, so we don't need to
// update the counteres for the from survivor space (s0)
to_counters()->update_capacity(pad_capacity(survivor_space_committed()));
to_counters()->update_used(survivor_space_used());
old_space_counters()->update_capacity(old_space_committed());
old_space_counters()->update_capacity(pad_capacity(old_space_committed()));
old_space_counters()->update_used(old_space_used());
non_young_collection_counters()->update_all();
old_collection_counters()->update_all();
young_collection_counters()->update_all();
}
}
void G1MonitoringSupport::update_eden_counters() {
void G1MonitoringSupport::update_eden_size() {
recalculate_eden_size();
if (UsePerfData) {
eden_counters()->update_capacity(eden_space_committed());
eden_counters()->update_used(eden_space_used());
}
}
......@@ -28,101 +28,95 @@
#include "gc_implementation/shared/hSpaceCounters.hpp"
class G1CollectedHeap;
class G1SpaceMonitoringSupport;
// Class for monitoring logical spaces in G1.
// G1 defines a set of regions as a young
// collection (analogous to a young generation).
// The young collection is a logical generation
// with no fixed chunk (see space.hpp) reflecting
// the address space for the generation. In addition
// to the young collection there is its complement
// the non-young collection that is simply the regions
// not in the young collection. The non-young collection
// is treated here as a logical old generation only
// because the monitoring tools expect a generational
// heap. The monitoring tools expect that a Space
// (see space.hpp) exists that describe the
// address space of young collection and non-young
// collection and such a view is provided here.
//
// This class provides interfaces to access
// the value of variables for the young collection
// that include the "capacity" and "used" of the
// young collection along with constant values
// for the minimum and maximum capacities for
// the logical spaces. Similarly for the non-young
// collection.
//
// Also provided are counters for G1 concurrent collections
// and stop-the-world full heap collecitons.
//
// Below is a description of how "used" and "capactiy"
// (or committed) is calculated for the logical spaces.
//
// 1) The used space calculation for a pool is not necessarily
// independent of the others. We can easily get from G1 the overall
// used space in the entire heap, the number of regions in the young
// generation (includes both eden and survivors), and the number of
// survivor regions. So, from that we calculate:
//
// survivor_used = survivor_num * region_size
// eden_used = young_region_num * region_size - survivor_used
// old_gen_used = overall_used - eden_used - survivor_used
//
// Note that survivor_used and eden_used are upper bounds. To get the
// actual value we would have to iterate over the regions and add up
// ->used(). But that'd be expensive. So, we'll accept some lack of
// accuracy for those two. But, we have to be careful when calculating
// old_gen_used, in case we subtract from overall_used more then the
// actual number and our result goes negative.
//
// 2) Calculating the used space is straightforward, as described
// above. However, how do we calculate the committed space, given that
// we allocate space for the eden, survivor, and old gen out of the
// same pool of regions? One way to do this is to use the used value
// as also the committed value for the eden and survivor spaces and
// then calculate the old gen committed space as follows:
//
// old_gen_committed = overall_committed - eden_committed - survivor_committed
//
// Maybe a better way to do that would be to calculate used for eden
// and survivor as a sum of ->used() over their regions and then
// calculate committed as region_num * region_size (i.e., what we use
// to calculate the used space now). This is something to consider
// in the future.
//
// 3) Another decision that is again not straightforward is what is
// the max size that each memory pool can grow to. One way to do this
// would be to use the committed size for the max for the eden and
// survivors and calculate the old gen max as follows (basically, it's
// a similar pattern to what we use for the committed space, as
// described above):
//
// old_gen_max = overall_max - eden_max - survivor_max
//
// Unfortunately, the above makes the max of each pool fluctuate over
// time and, even though this is allowed according to the spec, it
// broke several assumptions in the M&M framework (there were cases
// where used would reach a value greater than max). So, for max we
// use -1, which means "undefined" according to the spec.
//
// 4) Now, there is a very subtle issue with all the above. The
// framework will call get_memory_usage() on the three pools
// asynchronously. As a result, each call might get a different value
// for, say, survivor_num which will yield inconsistent values for
// eden_used, survivor_used, and old_gen_used (as survivor_num is used
// in the calculation of all three). This would normally be
// ok. However, it's possible that this might cause the sum of
// eden_used, survivor_used, and old_gen_used to go over the max heap
// size and this seems to sometimes cause JConsole (and maybe other
// clients) to get confused. There's not a really an easy / clean
// solution to this problem, due to the asynchrounous nature of the
// framework.
// Class for monitoring logical spaces in G1. It provides data for
// both G1's jstat counters as well as G1's memory pools.
//
// G1 splits the heap into heap regions and each heap region belongs
// to one of the following categories:
//
// * eden : regions that have been allocated since the last GC
// * survivors : regions with objects that survived the last few GCs
// * old : long-lived non-humongous regions
// * humongous : humongous regions
// * free : free regions
//
// The combination of eden and survivor regions form the equivalent of
// the young generation in the other GCs. The combination of old and
// humongous regions form the equivalent of the old generation in the
// other GCs. Free regions do not have a good equivalent in the other
// GCs given that they can be allocated as any of the other region types.
//
// The monitoring tools expect the heap to contain a number of
// generations (young, old, perm) and each generation to contain a
// number of spaces (young: eden, survivors, old). Given that G1 does
// not maintain those spaces physically (e.g., the set of
// non-contiguous eden regions can be considered as a "logical"
// space), we'll provide the illusion that those generations and
// spaces exist. In reality, each generation and space refers to a set
// of heap regions that are potentially non-contiguous.
//
// This class provides interfaces to access the min, current, and max
// capacity and current occupancy for each of G1's logical spaces and
// generations we expose to the monitoring tools. Also provided are
// counters for G1 concurrent collections and stop-the-world full heap
// collections.
//
// Below is a description of how the various sizes are calculated.
//
// * Current Capacity
//
// - heap_capacity = current heap capacity (e.g., current committed size)
// - young_gen_capacity = current max young gen target capacity
// (i.e., young gen target capacity + max allowed expansion capacity)
// - survivor_capacity = current survivor region capacity
// - eden_capacity = young_gen_capacity - survivor_capacity
// - old_capacity = heap_capacity - young_gen_capacity
//
// What we do in the above is to distribute the free regions among
// eden_capacity and old_capacity.
//
// * Occupancy
//
// - young_gen_used = current young region capacity
// - survivor_used = survivor_capacity
// - eden_used = young_gen_used - survivor_used
// - old_used = overall_used - young_gen_used
//
// Unfortunately, we currently only keep track of the number of
// currently allocated young and survivor regions + the overall used
// bytes in the heap, so the above can be a little inaccurate.
//
// * Min Capacity
//
// We set this to 0 for all spaces. We could consider setting the old
// min capacity to the min capacity of the heap (see 7078465).
//
// * Max Capacity
//
// For jstat, we set the max capacity of all spaces to heap_capacity,
// given that we don't always have a reasonably upper bound on how big
// each space can grow. For the memory pools, we actually make the max
// capacity undefined. We could consider setting the old max capacity
// to the max capacity of the heap (see 7078465).
//
// If we had more accurate occupancy / capacity information per
// region set the above calculations would be greatly simplified and
// be made more accurate.
//
// We update all the above synchronously and we store the results in
// fields so that we just read said fields when needed. A subtle point
// is that all the above sizes need to be recalculated when the old
// gen changes capacity (after a GC or after a humongous allocation)
// but only the eden occupancy changes when a new eden region is
// allocated. So, in the latter case we have minimal recalcuation to
// do which is important as we want to keep the eden region allocation
// path as low-overhead as possible.
class G1MonitoringSupport : public CHeapObj {
friend class VMStructs;
G1CollectedHeap* _g1h;
VirtualSpace* _g1_storage_addr;
// jstat performance counters
// incremental collections both fully and partially young
......@@ -133,9 +127,9 @@ class G1MonitoringSupport : public CHeapObj {
// _from_counters, and _to_counters are associated with
// this "generational" counter.
GenerationCounters* _young_collection_counters;
// non-young collection set counters. The _old_space_counters
// old collection set counters. The _old_space_counters
// below are associated with this "generational" counter.
GenerationCounters* _non_young_collection_counters;
GenerationCounters* _old_collection_counters;
// Counters for the capacity and used for
// the whole heap
HSpaceCounters* _old_space_counters;
......@@ -145,6 +139,27 @@ class G1MonitoringSupport : public CHeapObj {
HSpaceCounters* _from_counters;
HSpaceCounters* _to_counters;
// When it's appropriate to recalculate the various sizes (at the
// end of a GC, when a new eden region is allocated, etc.) we store
// them here so that we can easily report them when needed and not
// have to recalculate them every time.
size_t _overall_reserved;
size_t _overall_committed;
size_t _overall_used;
size_t _young_region_num;
size_t _young_gen_committed;
size_t _eden_committed;
size_t _eden_used;
size_t _survivor_committed;
size_t _survivor_used;
size_t _old_committed;
size_t _old_used;
G1CollectedHeap* g1h() { return _g1h; }
// It returns x - y if x > y, 0 otherwise.
// As described in the comment above, some of the inputs to the
// calculations we have to do are obtained concurrently and hence
......@@ -160,15 +175,35 @@ class G1MonitoringSupport : public CHeapObj {
}
}
// Recalculate all the sizes.
void recalculate_sizes();
// Recalculate only what's necessary when a new eden region is allocated.
void recalculate_eden_size();
public:
G1MonitoringSupport(G1CollectedHeap* g1h, VirtualSpace* g1_storage_addr);
G1MonitoringSupport(G1CollectedHeap* g1h);
G1CollectedHeap* g1h() { return _g1h; }
VirtualSpace* g1_storage_addr() { return _g1_storage_addr; }
// Unfortunately, the jstat tool assumes that no space has 0
// capacity. In our case, given that each space is logical, it's
// possible that no regions will be allocated to it, hence to have 0
// capacity (e.g., if there are no survivor regions, the survivor
// space has 0 capacity). The way we deal with this is to always pad
// each capacity value we report to jstat by a very small amount to
// make sure that it's never zero. Given that we sometimes have to
// report a capacity of a generation that contains several spaces
// (e.g., young gen includes one eden, two survivor spaces), the
// mult parameter is provided in order to adding the appropriate
// padding multiple times so that the capacities add up correctly.
static size_t pad_capacity(size_t size_bytes, size_t mult = 1) {
return size_bytes + MinObjAlignmentInBytes * mult;
}
// Performance Counter accessors
void update_counters();
void update_eden_counters();
// Recalculate all the sizes from scratch and update all the jstat
// counters accordingly.
void update_sizes();
// Recalculate only what's necessary when a new eden region is
// allocated and update any jstat counters that need to be updated.
void update_eden_size();
CollectorCounters* incremental_collection_counters() {
return _incremental_collection_counters;
......@@ -176,8 +211,11 @@ class G1MonitoringSupport : public CHeapObj {
CollectorCounters* full_collection_counters() {
return _full_collection_counters;
}
GenerationCounters* non_young_collection_counters() {
return _non_young_collection_counters;
GenerationCounters* young_collection_counters() {
return _young_collection_counters;
}
GenerationCounters* old_collection_counters() {
return _old_collection_counters;
}
HSpaceCounters* old_space_counters() { return _old_space_counters; }
HSpaceCounters* eden_counters() { return _eden_counters; }
......@@ -187,17 +225,45 @@ class G1MonitoringSupport : public CHeapObj {
// Monitoring support used by
// MemoryService
// jstat counters
size_t overall_committed();
size_t overall_used();
size_t eden_space_committed();
size_t eden_space_used();
size_t overall_reserved() { return _overall_reserved; }
size_t overall_committed() { return _overall_committed; }
size_t overall_used() { return _overall_used; }
size_t young_gen_committed() { return _young_gen_committed; }
size_t young_gen_max() { return overall_reserved(); }
size_t eden_space_committed() { return _eden_committed; }
size_t eden_space_used() { return _eden_used; }
size_t survivor_space_committed() { return _survivor_committed; }
size_t survivor_space_used() { return _survivor_used; }
size_t old_gen_committed() { return old_space_committed(); }
size_t old_gen_max() { return overall_reserved(); }
size_t old_space_committed() { return _old_committed; }
size_t old_space_used() { return _old_used; }
};
size_t survivor_space_committed();
size_t survivor_space_used();
class G1GenerationCounters: public GenerationCounters {
protected:
G1MonitoringSupport* _g1mm;
public:
G1GenerationCounters(G1MonitoringSupport* g1mm,
const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity);
};
class G1YoungGenerationCounters: public G1GenerationCounters {
public:
G1YoungGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
virtual void update_all();
};
size_t old_space_committed();
size_t old_space_used();
class G1OldGenerationCounters: public G1GenerationCounters {
public:
G1OldGenerationCounters(G1MonitoringSupport* g1mm, const char* name);
virtual void update_all();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1MONITORINGSUPPORT_HPP
......@@ -34,6 +34,7 @@ class CMBitMap;
class CMMarkStack;
class G1ParScanThreadState;
class CMTask;
class ReferenceProcessor;
// A class that scans oops in a given heap region (much as OopsInGenClosure
// scans oops in a generation.)
......@@ -59,8 +60,10 @@ public:
class G1ParPushHeapRSClosure : public G1ParClosureSuper {
public:
G1ParPushHeapRSClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParPushHeapRSClosure(G1CollectedHeap* g1,
G1ParScanThreadState* par_scan_state):
G1ParClosureSuper(g1, par_scan_state) { }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
......@@ -68,8 +71,13 @@ public:
class G1ParScanClosure : public G1ParClosureSuper {
public:
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state) { }
G1ParScanClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
G1ParClosureSuper(g1, par_scan_state)
{
assert(_ref_processor == NULL, "sanity");
_ref_processor = rp;
}
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
......@@ -92,9 +100,18 @@ template <class T> inline oop clear_partial_array_mask(T* ref) {
class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
G1ParScanClosure _scanner;
public:
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state) { }
G1ParScanPartialArrayClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state, ReferenceProcessor* rp) :
G1ParClosureSuper(g1, par_scan_state), _scanner(g1, par_scan_state, rp)
{
assert(_ref_processor == NULL, "sanity");
}
G1ParScanClosure* scanner() {
return &_scanner;
}
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
......@@ -105,7 +122,8 @@ class G1ParCopyHelper : public G1ParClosureSuper {
G1ParScanClosure *_scanner;
protected:
template <class T> void mark_object(T* p);
oop copy_to_survivor_space(oop obj, bool should_mark_copy);
oop copy_to_survivor_space(oop obj, bool should_mark_root,
bool should_mark_copy);
public:
G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
G1ParScanClosure *scanner) :
......@@ -116,10 +134,20 @@ template<bool do_gen_barrier, G1Barrier barrier,
bool do_mark_object>
class G1ParCopyClosure : public G1ParCopyHelper {
G1ParScanClosure _scanner;
template <class T> void do_oop_work(T* p);
public:
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
_scanner(g1, par_scan_state), G1ParCopyHelper(g1, par_scan_state, &_scanner) { }
G1ParCopyClosure(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state,
ReferenceProcessor* rp) :
_scanner(g1, par_scan_state, rp),
G1ParCopyHelper(g1, par_scan_state, &_scanner)
{
assert(_ref_processor == NULL, "sanity");
}
G1ParScanClosure* scanner() { return &_scanner; }
template <class T> void do_oop_nv(T* p) {
do_oop_work(p);
}
......@@ -129,21 +157,25 @@ public:
typedef G1ParCopyClosure<false, G1BarrierNone, false> G1ParScanExtRootClosure;
typedef G1ParCopyClosure<true, G1BarrierNone, false> G1ParScanPermClosure;
typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
typedef G1ParCopyClosure<false, G1BarrierNone, true> G1ParScanAndMarkExtRootClosure;
typedef G1ParCopyClosure<true, G1BarrierNone, true> G1ParScanAndMarkPermClosure;
typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
// This is the only case when we set skip_cset_test. Basically, this
// closure is (should?) only be called directly while we're draining
// the overflow and task queues. In that case we know that the
// reference in question points into the collection set, otherwise we
// would not have pushed it on the queue. The following is defined in
// g1_specialized_oop_closures.hpp.
// typedef G1ParCopyClosure<false, G1BarrierEvac, false, true> G1ParScanHeapEvacClosure;
// We need a separate closure to handle references during evacuation
// failure processing, as we cannot asume that the reference already
// points into the collection set (like G1ParScanHeapEvacClosure does).
// The following closure types are no longer used but are retained
// for historical reasons:
// typedef G1ParCopyClosure<false, G1BarrierRS, false> G1ParScanHeapRSClosure;
// typedef G1ParCopyClosure<false, G1BarrierRS, true> G1ParScanAndMarkHeapRSClosure;
// The following closure type is defined in g1_specialized_oop_closures.hpp:
//
// typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacClosure;
// We use a separate closure to handle references during evacuation
// failure processing.
// We could have used another instance of G1ParScanHeapEvacClosure
// (since that closure no longer assumes that the references it
// handles point into the collection set).
typedef G1ParCopyClosure<false, G1BarrierEvac, false> G1ParScanHeapEvacFailureClosure;
class FilterIntoCSClosure: public OopClosure {
......@@ -152,9 +184,10 @@ class FilterIntoCSClosure: public OopClosure {
DirtyCardToOopClosure* _dcto_cl;
public:
FilterIntoCSClosure( DirtyCardToOopClosure* dcto_cl,
G1CollectedHeap* g1, OopClosure* oc) :
_dcto_cl(dcto_cl), _g1(g1), _oc(oc)
{}
G1CollectedHeap* g1,
OopClosure* oc) :
_dcto_cl(dcto_cl), _g1(g1), _oc(oc) { }
template <class T> void do_oop_nv(T* p);
virtual void do_oop(oop* p) { do_oop_nv(p); }
virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
......
......@@ -234,6 +234,7 @@ void G1RemSet::scanRS(OopsInHeapRegionClosure* oc, int worker_i) {
HeapRegion *startRegion = calculateStartRegion(worker_i);
ScanRSClosure scanRScl(oc, worker_i);
_g1->collection_set_iterate_from(startRegion, &scanRScl);
scanRScl.set_try_claimed();
_g1->collection_set_iterate_from(startRegion, &scanRScl);
......@@ -283,6 +284,7 @@ void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) {
double start = os::elapsedTime();
// Apply the given closure to all remaining log entries.
RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq);
_g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i);
// Now there should be no dirty cards.
......
......@@ -33,11 +33,11 @@
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
int HeapRegion::LogOfHRGrainBytes = 0;
int HeapRegion::LogOfHRGrainWords = 0;
int HeapRegion::GrainBytes = 0;
int HeapRegion::GrainWords = 0;
int HeapRegion::CardsPerRegion = 0;
int HeapRegion::LogOfHRGrainBytes = 0;
int HeapRegion::LogOfHRGrainWords = 0;
size_t HeapRegion::GrainBytes = 0;
size_t HeapRegion::GrainWords = 0;
size_t HeapRegion::CardsPerRegion = 0;
HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
HeapRegion* hr, OopClosure* cl,
......@@ -45,7 +45,7 @@ HeapRegionDCTOC::HeapRegionDCTOC(G1CollectedHeap* g1,
FilterKind fk) :
ContiguousSpaceDCTOC(hr, cl, precision, NULL),
_hr(hr), _fk(fk), _g1(g1)
{}
{ }
FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
OopClosure* oc) :
......@@ -210,15 +210,17 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
HeapWord* top,
OopClosure* cl) {
G1CollectedHeap* g1h = _g1;
int oop_size;
OopClosure* cl2 = NULL;
OopClosure* cl2 = cl;
FilterIntoCSClosure intoCSFilt(this, g1h, cl);
FilterOutOfRegionClosure outOfRegionFilt(_hr, cl);
switch (_fk) {
case NoFilterKind: cl2 = cl; break;
case IntoCSFilterKind: cl2 = &intoCSFilt; break;
case OutOfRegionFilterKind: cl2 = &outOfRegionFilt; break;
default: ShouldNotReachHere();
}
// Start filtering what we add to the remembered set. If the object is
......@@ -239,16 +241,19 @@ void HeapRegionDCTOC::walk_mem_region_with_cl(MemRegion mr,
case NoFilterKind:
bottom = walk_mem_region_loop(cl, g1h, _hr, bottom, top);
break;
case IntoCSFilterKind: {
FilterIntoCSClosure filt(this, g1h, cl);
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
break;
}
case OutOfRegionFilterKind: {
FilterOutOfRegionClosure filt(_hr, cl);
bottom = walk_mem_region_loop(&filt, g1h, _hr, bottom, top);
break;
}
default:
ShouldNotReachHere();
}
......@@ -317,11 +322,11 @@ void HeapRegion::setup_heap_region_size(uintx min_heap_size) {
guarantee(GrainBytes == 0, "we should only set it once");
// The cast to int is safe, given that we've bounded region_size by
// MIN_REGION_SIZE and MAX_REGION_SIZE.
GrainBytes = (int) region_size;
GrainBytes = (size_t)region_size;
guarantee(GrainWords == 0, "we should only set it once");
GrainWords = GrainBytes >> LogHeapWordSize;
guarantee(1 << LogOfHRGrainWords == GrainWords, "sanity");
guarantee((size_t)(1 << LogOfHRGrainWords) == GrainWords, "sanity");
guarantee(CardsPerRegion == 0, "we should only set it once");
CardsPerRegion = GrainBytes >> CardTableModRefBS::card_shift;
......@@ -374,8 +379,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space) {
void HeapRegion::par_clear() {
assert(used() == 0, "the region should have been already cleared");
assert(capacity() == (size_t) HeapRegion::GrainBytes,
"should be back to normal");
assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
HeapRegionRemSet* hrrs = rem_set();
hrrs->clear();
CardTableModRefBS* ct_bs =
......@@ -431,7 +435,7 @@ void HeapRegion::set_notHumongous() {
assert(end() == _orig_end, "sanity");
}
assert(capacity() == (size_t) HeapRegion::GrainBytes, "pre-condition");
assert(capacity() == HeapRegion::GrainBytes, "pre-condition");
_humongous_type = NotHumongous;
_humongous_start_region = NULL;
}
......@@ -483,12 +487,13 @@ HeapRegion::
HeapRegion(size_t hrs_index, G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed)
: G1OffsetTableContigSpace(sharedOffsetArray, mr, is_zeroed),
_next_fk(HeapRegionDCTOC::NoFilterKind), _hrs_index(hrs_index),
_hrs_index(hrs_index),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
_claimed(InitialClaimValue), _evacuation_failed(false),
_prev_marked_bytes(0), _next_marked_bytes(0), _sort_index(-1),
_gc_efficiency(0.0),
_young_type(NotYoung), _next_young_region(NULL),
_next_dirty_cards_region(NULL), _next(NULL), _pending_removal(false),
#ifdef ASSERT
......
......@@ -118,7 +118,6 @@ public:
FilterKind fk);
};
// The complicating factor is that BlockOffsetTable diverged
// significantly, and we need functionality that is only in the G1 version.
// So I copied that code, which led to an alternate G1 version of
......@@ -223,10 +222,6 @@ class HeapRegion: public G1OffsetTableContigSpace {
ContinuesHumongous
};
// The next filter kind that should be used for a "new_dcto_cl" call with
// the "traditional" signature.
HeapRegionDCTOC::FilterKind _next_fk;
// Requires that the region "mr" be dense with objects, and begin and end
// with an object.
void oops_in_mr_iterate(MemRegion mr, OopClosure* cl);
......@@ -351,16 +346,17 @@ class HeapRegion: public G1OffsetTableContigSpace {
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr, bool is_zeroed);
static int LogOfHRGrainBytes;
static int LogOfHRGrainWords;
// The normal type of these should be size_t. However, they used to
// be members of an enum before and they are assumed by the
// compilers to be ints. To avoid going and fixing all their uses,
// I'm declaring them as ints. I'm not anticipating heap region
// sizes to reach anywhere near 2g, so using an int here is safe.
static int GrainBytes;
static int GrainWords;
static int CardsPerRegion;
static int LogOfHRGrainBytes;
static int LogOfHRGrainWords;
static size_t GrainBytes;
static size_t GrainWords;
static size_t CardsPerRegion;
static size_t align_up_to_region_byte_size(size_t sz) {
return (sz + (size_t) GrainBytes - 1) &
~((1 << (size_t) LogOfHRGrainBytes) - 1);
}
// It sets up the heap region size (GrainBytes / GrainWords), as
// well as other related fields that are based on the heap region
......@@ -573,40 +569,14 @@ class HeapRegion: public G1OffsetTableContigSpace {
// allocated in the current region before the last call to "save_mark".
void oop_before_save_marks_iterate(OopClosure* cl);
// This call determines the "filter kind" argument that will be used for
// the next call to "new_dcto_cl" on this region with the "traditional"
// signature (i.e., the call below.) The default, in the absence of a
// preceding call to this method, is "NoFilterKind", and a call to this
// method is necessary for each such call, or else it reverts to the
// default.
// (This is really ugly, but all other methods I could think of changed a
// lot of main-line code for G1.)
void set_next_filter_kind(HeapRegionDCTOC::FilterKind nfk) {
_next_fk = nfk;
}
DirtyCardToOopClosure*
new_dcto_closure(OopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapRegionDCTOC::FilterKind fk);
#if WHASSUP
DirtyCardToOopClosure*
new_dcto_closure(OopClosure* cl,
CardTableModRefBS::PrecisionStyle precision,
HeapWord* boundary) {
assert(boundary == NULL, "This arg doesn't make sense here.");
DirtyCardToOopClosure* res = new_dcto_closure(cl, precision, _next_fk);
_next_fk = HeapRegionDCTOC::NoFilterKind;
return res;
}
#endif
//
// Note the start or end of marking. This tells the heap region
// that the collector is about to start or has finished (concurrently)
// marking the heap.
//
// Note the start of a marking phase. Record the
// start of the unmarked area of the region here.
......
......@@ -148,7 +148,7 @@ protected:
CardIdx_t from_card = (CardIdx_t)
hw_offset >> (CardTableModRefBS::card_shift - LogHeapWordSize);
assert(0 <= from_card && from_card < HeapRegion::CardsPerRegion,
assert(0 <= from_card && (size_t)from_card < HeapRegion::CardsPerRegion,
"Must be in range.");
add_card_work(from_card, par);
}
......@@ -639,7 +639,7 @@ void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) {
uintptr_t(from_hr->bottom())
>> CardTableModRefBS::card_shift;
CardIdx_t card_index = from_card - from_hr_bot_card_index;
assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
if (G1HRRSUseSparseTable &&
_sparse_table.add_card(from_hrs_ind, card_index)) {
......@@ -1066,7 +1066,7 @@ bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const
uintptr_t(hr->bottom()) >> CardTableModRefBS::card_shift;
assert(from_card >= hr_bot_card_index, "Inv");
CardIdx_t card_index = from_card - hr_bot_card_index;
assert(0 <= card_index && card_index < HeapRegion::CardsPerRegion,
assert(0 <= card_index && (size_t)card_index < HeapRegion::CardsPerRegion,
"Must be in range.");
return _sparse_table.contains_card(hr_ind, card_index);
}
......@@ -1191,7 +1191,7 @@ void HeapRegionRemSetIterator::initialize(const HeapRegionRemSet* hrrs) {
_is = Sparse;
// Set these values so that we increment to the first region.
_coarse_cur_region_index = -1;
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);;
_coarse_cur_region_cur_card = (HeapRegion::CardsPerRegion-1);
_cur_region_cur_card = 0;
......@@ -1270,7 +1270,7 @@ bool HeapRegionRemSetIterator::fine_has_next(size_t& card_index) {
bool HeapRegionRemSetIterator::fine_has_next() {
return
_fine_cur_prt != NULL &&
_cur_region_cur_card < (size_t) HeapRegion::CardsPerRegion;
_cur_region_cur_card < HeapRegion::CardsPerRegion;
}
bool HeapRegionRemSetIterator::has_next(size_t& card_index) {
......
......@@ -395,8 +395,8 @@ class HeapRegionRemSetIterator : public CHeapObj {
// Coarse table iteration fields:
// Current region index;
int _coarse_cur_region_index;
int _coarse_cur_region_cur_card;
int _coarse_cur_region_index;
size_t _coarse_cur_region_cur_card;
bool coarse_has_next(size_t& card_index);
......
......@@ -56,6 +56,7 @@ class FreeRegionList;
// and maintain that: _length <= _allocated_length <= _max_length
class HeapRegionSeq: public CHeapObj {
friend class VMStructs;
// The array that holds the HeapRegions.
HeapRegion** _regions;
......
......@@ -29,6 +29,7 @@
#include "memory/sharedHeap.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/thread.hpp"
#include "runtime/vmThread.hpp"
// This method removes entries from an SATB buffer that will not be
// useful to the concurrent marking threads. An entry is removed if it
......@@ -252,9 +253,18 @@ void SATBMarkQueueSet::par_iterate_closure_all_threads(int worker) {
t->satb_mark_queue().apply_closure(_par_closures[worker]);
}
}
// We'll have worker 0 do this one.
if (worker == 0) {
shared_satb_queue()->apply_closure(_par_closures[0]);
// We also need to claim the VMThread so that its parity is updated
// otherwise the next call to Thread::possibly_parallel_oops_do inside
// a StrongRootsScope might skip the VMThread because it has a stale
// parity that matches the parity set by the StrongRootsScope
//
// Whichever worker succeeds in claiming the VMThread gets to do
// the shared queue.
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(true, parity)) {
shared_satb_queue()->apply_closure(_par_closures[worker]);
}
}
......
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
\
static_field(HeapRegion, GrainBytes, size_t) \
\
nonstatic_field(HeapRegionSeq, _regions, HeapRegion**) \
nonstatic_field(HeapRegionSeq, _length, size_t) \
\
nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \
nonstatic_field(G1CollectedHeap, _g1_committed, MemRegion) \
nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \
nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \
\
nonstatic_field(G1MonitoringSupport, _eden_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _eden_used, size_t) \
nonstatic_field(G1MonitoringSupport, _survivor_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _survivor_used, size_t) \
nonstatic_field(G1MonitoringSupport, _old_committed, size_t) \
nonstatic_field(G1MonitoringSupport, _old_used, size_t) \
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
\
declare_type(G1CollectedHeap, SharedHeap) \
\
declare_type(HeapRegion, ContiguousSpace) \
declare_toplevel_type(HeapRegionSeq) \
declare_toplevel_type(G1MonitoringSupport) \
\
declare_toplevel_type(G1CollectedHeap*) \
declare_toplevel_type(HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
......@@ -198,10 +198,9 @@ void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
allocate_stacks();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ref_processor()->setup_policy(clear_all_softrefs);
mark_sweep_phase1(clear_all_softrefs);
......
......@@ -2069,10 +2069,9 @@ void PSParallelCompact::invoke_no_policy(bool maximum_heap_compaction) {
CodeCache::gc_prologue();
Threads::gc_prologue();
NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
COMPILER2_PRESENT(DerivedPointerTable::clear());
ref_processor()->enable_discovery();
ref_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
ref_processor()->setup_policy(maximum_heap_compaction);
bool marked_for_unloading = false;
......
......@@ -102,17 +102,15 @@ void PSPromotionLAB::flush() {
_state = flushed;
}
bool PSPromotionLAB::unallocate_object(oop obj) {
bool PSPromotionLAB::unallocate_object(HeapWord* obj, size_t obj_size) {
assert(Universe::heap()->is_in(obj), "Object outside heap");
if (contains(obj)) {
HeapWord* object_end = (HeapWord*)obj + obj->size();
assert(object_end <= top(), "Object crosses promotion LAB boundary");
HeapWord* object_end = obj + obj_size;
assert(object_end == top(), "Not matching last allocation");
if (object_end == top()) {
set_top((HeapWord*)obj);
return true;
}
set_top(obj);
return true;
}
return false;
......
......@@ -73,7 +73,7 @@ class PSPromotionLAB : public CHeapObj {
bool is_flushed() { return _state == flushed; }
bool unallocate_object(oop obj);
bool unallocate_object(HeapWord* obj, size_t obj_size);
// Returns a subregion containing all objects in this space.
MemRegion used_region() { return MemRegion(bottom(), top()); }
......
......@@ -380,10 +380,10 @@ oop PSPromotionManager::copy_to_survivor_space(oop o) {
// deallocate it, so we have to test. If the deallocation fails,
// overwrite with a filler object.
if (new_obj_is_tenured) {
if (!_old_lab.unallocate_object(new_obj)) {
if (!_old_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
}
} else if (!_young_lab.unallocate_object(new_obj)) {
} else if (!_young_lab.unallocate_object((HeapWord*) new_obj, new_obj_size)) {
CollectedHeap::fill_with_object((HeapWord*) new_obj, new_obj_size);
}
......
......@@ -350,10 +350,9 @@ bool PSScavenge::invoke_no_policy() {
}
save_to_space_top_before_gc();
NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
COMPILER2_PRESENT(DerivedPointerTable::clear());
reference_processor()->enable_discovery();
reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
reference_processor()->setup_policy(false);
// We track how much was promoted to the next generation for
......
......@@ -26,14 +26,10 @@
#include "gc_implementation/shared/generationCounters.hpp"
#include "memory/resourceArea.hpp"
GenerationCounters::GenerationCounters(const char* name,
int ordinal, int spaces,
VirtualSpace* v):
_virtual_space(v) {
void GenerationCounters::initialize(const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity) {
if (UsePerfData) {
EXCEPTION_MARK;
ResourceMark rm;
......@@ -51,18 +47,37 @@ GenerationCounters::GenerationCounters(const char* name,
cname = PerfDataManager::counter_name(_name_space, "minCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
_virtual_space == NULL ? 0 :
_virtual_space->committed_size(), CHECK);
min_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "maxCapacity");
PerfDataManager::create_constant(SUN_GC, cname, PerfData::U_Bytes,
_virtual_space == NULL ? 0 :
_virtual_space->reserved_size(), CHECK);
max_capacity, CHECK);
cname = PerfDataManager::counter_name(_name_space, "capacity");
_current_size = PerfDataManager::create_variable(SUN_GC, cname,
PerfData::U_Bytes,
_virtual_space == NULL ? 0 :
_virtual_space->committed_size(), CHECK);
_current_size =
PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
curr_capacity, CHECK);
}
}
GenerationCounters::GenerationCounters(const char* name,
int ordinal, int spaces,
VirtualSpace* v)
: _virtual_space(v) {
assert(v != NULL, "don't call this constructor if v == NULL");
initialize(name, ordinal, spaces,
v->committed_size(), v->reserved_size(), v->committed_size());
}
GenerationCounters::GenerationCounters(const char* name,
int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity)
: _virtual_space(NULL) {
initialize(name, ordinal, spaces, min_capacity, max_capacity, curr_capacity);
}
void GenerationCounters::update_all() {
assert(_virtual_space != NULL, "otherwise, override this method");
_current_size->set_value(_virtual_space->committed_size());
}
......@@ -34,6 +34,11 @@
class GenerationCounters: public CHeapObj {
friend class VMStructs;
private:
void initialize(const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity);
protected:
PerfVariable* _current_size;
VirtualSpace* _virtual_space;
......@@ -48,11 +53,18 @@ class GenerationCounters: public CHeapObj {
char* _name_space;
// This constructor is only meant for use with the PSGenerationCounters
// constructor. The need for such an constructor should be eliminated
// constructor. The need for such an constructor should be eliminated
// when VirtualSpace and PSVirtualSpace are unified.
GenerationCounters() : _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
public:
GenerationCounters()
: _name_space(NULL), _current_size(NULL), _virtual_space(NULL) {}
// This constructor is used for subclasses that do not have a space
// associated with them (e.g, in G1).
GenerationCounters(const char* name, int ordinal, int spaces,
size_t min_capacity, size_t max_capacity,
size_t curr_capacity);
public:
GenerationCounters(const char* name, int ordinal, int spaces,
VirtualSpace* v);
......@@ -60,10 +72,7 @@ class GenerationCounters: public CHeapObj {
if (_name_space != NULL) FREE_C_HEAP_ARRAY(char, _name_space);
}
virtual void update_all() {
_current_size->set_value(_virtual_space == NULL ? 0 :
_virtual_space->committed_size());
}
virtual void update_all();
const char* name_space() const { return _name_space; }
......
......@@ -599,8 +599,7 @@ void GenCollectedHeap::do_collection(bool full,
// atomic wrt other collectors in this configuration, we
// are guaranteed to have empty discovered ref lists.
if (rp->discovery_is_atomic()) {
rp->verify_no_references_recorded();
rp->enable_discovery();
rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
rp->setup_policy(do_clear_all_soft_refs);
} else {
// collect() below will enable discovery as appropriate
......
......@@ -35,42 +35,8 @@
ReferencePolicy* ReferenceProcessor::_always_clear_soft_ref_policy = NULL;
ReferencePolicy* ReferenceProcessor::_default_soft_ref_policy = NULL;
const int subclasses_of_ref = REF_PHANTOM - REF_OTHER;
bool ReferenceProcessor::_pending_list_uses_discovered_field = false;
// List of discovered references.
class DiscoveredList {
public:
DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
oop head() const {
return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
_oop_head;
}
HeapWord* adr_head() {
return UseCompressedOops ? (HeapWord*)&_compressed_head :
(HeapWord*)&_oop_head;
}
void set_head(oop o) {
if (UseCompressedOops) {
// Must compress the head ptr.
_compressed_head = oopDesc::encode_heap_oop(o);
} else {
_oop_head = o;
}
}
bool empty() const { return head() == NULL; }
size_t length() { return _len; }
void set_length(size_t len) { _len = len; }
void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
void dec_length(size_t dec) { _len -= dec; }
private:
// Set value depending on UseCompressedOops. This could be a template class
// but then we have to fix all the instantiations and declarations that use this class.
oop _oop_head;
narrowOop _compressed_head;
size_t _len;
};
void referenceProcessor_init() {
ReferenceProcessor::init_statics();
}
......@@ -112,7 +78,8 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_discovery_is_mt = mt_discovery;
_num_q = MAX2(1, mt_processing_degree);
_max_num_q = MAX2(_num_q, mt_discovery_degree);
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList, _max_num_q * subclasses_of_ref);
_discoveredSoftRefs = NEW_C_HEAP_ARRAY(DiscoveredList,
_max_num_q * number_of_subclasses_of_ref());
if (_discoveredSoftRefs == NULL) {
vm_exit_during_initialization("Could not allocated RefProc Array");
}
......@@ -120,7 +87,7 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
_discoveredFinalRefs = &_discoveredWeakRefs[_max_num_q];
_discoveredPhantomRefs = &_discoveredFinalRefs[_max_num_q];
// Initialized all entries to NULL
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
_discoveredSoftRefs[i].set_head(NULL);
_discoveredSoftRefs[i].set_length(0);
}
......@@ -134,19 +101,15 @@ ReferenceProcessor::ReferenceProcessor(MemRegion span,
#ifndef PRODUCT
void ReferenceProcessor::verify_no_references_recorded() {
guarantee(!_discovering_refs, "Discovering refs?");
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
guarantee(_discoveredSoftRefs[i].empty(),
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
guarantee(_discoveredSoftRefs[i].is_empty(),
"Found non-empty discovered list");
}
}
#endif
void ReferenceProcessor::weak_oops_do(OopClosure* f) {
// Should this instead be
// for (int i = 0; i < subclasses_of_ref; i++_ {
// for (int j = 0; j < _num_q; j++) {
// int index = i * _max_num_q + j;
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
if (UseCompressedOops) {
f->do_oop((narrowOop*)_discoveredSoftRefs[i].adr_head());
} else {
......@@ -404,7 +367,7 @@ public:
// allocated and are indexed into.
assert(_n_queues == (int) _ref_processor.max_num_q(), "Different number not expected");
for (int j = 0;
j < subclasses_of_ref;
j < ReferenceProcessor::number_of_subclasses_of_ref();
j++, index += _n_queues) {
_ref_processor.enqueue_discovered_reflist(
_refs_lists[index], _pending_list_addr);
......@@ -424,7 +387,7 @@ void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr
task_executor->execute(tsk);
} else {
// Serial code: call the parent class's implementation
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
enqueue_discovered_reflist(_discoveredSoftRefs[i], pending_list_addr);
_discoveredSoftRefs[i].set_head(NULL);
_discoveredSoftRefs[i].set_length(0);
......@@ -432,119 +395,7 @@ void ReferenceProcessor::enqueue_discovered_reflists(HeapWord* pending_list_addr
}
}
// Iterator for the list of discovered references.
class DiscoveredListIterator {
public:
inline DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive);
// End Of List.
inline bool has_next() const { return _ref != NULL; }
// Get oop to the Reference object.
inline oop obj() const { return _ref; }
// Get oop to the referent object.
inline oop referent() const { return _referent; }
// Returns true if referent is alive.
inline bool is_referent_alive() const;
// Loads data for the current reference.
// The "allow_null_referent" argument tells us to allow for the possibility
// of a NULL referent in the discovered Reference object. This typically
// happens in the case of concurrent collectors that may have done the
// discovery concurrently, or interleaved, with mutator execution.
inline void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
// Move to the next discovered reference.
inline void next();
// Remove the current reference from the list
inline void remove();
// Make the Reference object active again.
inline void make_active() { java_lang_ref_Reference::set_next(_ref, NULL); }
// Make the referent alive.
inline void make_referent_alive() {
if (UseCompressedOops) {
_keep_alive->do_oop((narrowOop*)_referent_addr);
} else {
_keep_alive->do_oop((oop*)_referent_addr);
}
}
// Update the discovered field.
inline void update_discovered() {
// First _prev_next ref actually points into DiscoveredList (gross).
if (UseCompressedOops) {
if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
_keep_alive->do_oop((narrowOop*)_prev_next);
}
} else {
if (!oopDesc::is_null(*(oop*)_prev_next)) {
_keep_alive->do_oop((oop*)_prev_next);
}
}
}
// NULL out referent pointer.
inline void clear_referent() { oop_store_raw(_referent_addr, NULL); }
// Statistics
NOT_PRODUCT(
inline size_t processed() const { return _processed; }
inline size_t removed() const { return _removed; }
)
inline void move_to_next();
private:
DiscoveredList& _refs_list;
HeapWord* _prev_next;
oop _prev;
oop _ref;
HeapWord* _discovered_addr;
oop _next;
HeapWord* _referent_addr;
oop _referent;
OopClosure* _keep_alive;
BoolObjectClosure* _is_alive;
DEBUG_ONLY(
oop _first_seen; // cyclic linked list check
)
NOT_PRODUCT(
size_t _processed;
size_t _removed;
)
};
inline DiscoveredListIterator::DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive)
: _refs_list(refs_list),
_prev_next(refs_list.adr_head()),
_prev(NULL),
_ref(refs_list.head()),
#ifdef ASSERT
_first_seen(refs_list.head()),
#endif
#ifndef PRODUCT
_processed(0),
_removed(0),
#endif
_next(NULL),
_keep_alive(keep_alive),
_is_alive(is_alive)
{ }
inline bool DiscoveredListIterator::is_referent_alive() const {
return _is_alive->do_object_b(_referent);
}
inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referent)) {
_discovered_addr = java_lang_ref_Reference::discovered_addr(_ref);
oop discovered = java_lang_ref_Reference::discovered(_ref);
assert(_discovered_addr && discovered->is_oop_or_null(),
......@@ -560,13 +411,7 @@ inline void DiscoveredListIterator::load_ptrs(DEBUG_ONLY(bool allow_null_referen
"bad referent");
}
inline void DiscoveredListIterator::next() {
_prev_next = _discovered_addr;
_prev = _ref;
move_to_next();
}
inline void DiscoveredListIterator::remove() {
void DiscoveredListIterator::remove() {
assert(_ref->is_oop(), "Dropping a bad reference");
oop_store_raw(_discovered_addr, NULL);
......@@ -592,15 +437,29 @@ inline void DiscoveredListIterator::remove() {
_refs_list.dec_length(1);
}
inline void DiscoveredListIterator::move_to_next() {
if (_ref == _next) {
// End of the list.
_ref = NULL;
// Make the Reference object active again.
void DiscoveredListIterator::make_active() {
// For G1 we don't want to use set_next - it
// will dirty the card for the next field of
// the reference object and will fail
// CT verification.
if (UseG1GC) {
BarrierSet* bs = oopDesc::bs();
HeapWord* next_addr = java_lang_ref_Reference::next_addr(_ref);
if (UseCompressedOops) {
bs->write_ref_field_pre((narrowOop*)next_addr, NULL);
} else {
bs->write_ref_field_pre((oop*)next_addr, NULL);
}
java_lang_ref_Reference::set_next_raw(_ref, NULL);
} else {
_ref = _next;
java_lang_ref_Reference::set_next(_ref, NULL);
}
assert(_ref != _first_seen, "cyclic ref_list found");
NOT_PRODUCT(_processed++);
}
void DiscoveredListIterator::clear_referent() {
oop_store_raw(_referent_addr, NULL);
}
// NOTE: process_phase*() are largely similar, and at a high level
......@@ -786,10 +645,9 @@ ReferenceProcessor::abandon_partial_discovered_list(DiscoveredList& refs_list) {
void ReferenceProcessor::abandon_partial_discovery() {
// loop over the lists
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr("\nAbandoning %s discovered list",
list_name(i));
gclog_or_tty->print_cr("\nAbandoning %s discovered list", list_name(i));
}
abandon_partial_discovered_list(_discoveredSoftRefs[i]);
}
......@@ -858,6 +716,14 @@ private:
bool _clear_referent;
};
void ReferenceProcessor::set_discovered(oop ref, oop value) {
if (_discovered_list_needs_barrier) {
java_lang_ref_Reference::set_discovered(ref, value);
} else {
java_lang_ref_Reference::set_discovered_raw(ref, value);
}
}
// Balances reference queues.
// Move entries from all queues[0, 1, ..., _max_num_q-1] to
// queues[0, 1, ..., _num_q-1] because only the first _num_q
......@@ -915,9 +781,9 @@ void ReferenceProcessor::balance_queues(DiscoveredList ref_lists[])
// Add the chain to the to list.
if (ref_lists[to_idx].head() == NULL) {
// to list is empty. Make a loop at the end.
java_lang_ref_Reference::set_discovered(move_tail, move_tail);
set_discovered(move_tail, move_tail);
} else {
java_lang_ref_Reference::set_discovered(move_tail, ref_lists[to_idx].head());
set_discovered(move_tail, ref_lists[to_idx].head());
}
ref_lists[to_idx].set_head(move_head);
ref_lists[to_idx].inc_length(refs_to_move);
......@@ -1038,11 +904,7 @@ ReferenceProcessor::process_discovered_reflist(
void ReferenceProcessor::clean_up_discovered_references() {
// loop over the lists
// Should this instead be
// for (int i = 0; i < subclasses_of_ref; i++_ {
// for (int j = 0; j < _num_q; j++) {
// int index = i * _max_num_q + j;
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
if (TraceReferenceGC && PrintGCDetails && ((i % _max_num_q) == 0)) {
gclog_or_tty->print_cr(
"\nScrubbing %s discovered list of Null referents",
......@@ -1260,6 +1122,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
}
}
ResourceMark rm; // Needed for tracing.
HeapWord* const discovered_addr = java_lang_ref_Reference::discovered_addr(obj);
const oop discovered = java_lang_ref_Reference::discovered(obj);
assert(discovered->is_oop_or_null(), "bad discovered field");
......@@ -1472,7 +1336,9 @@ ReferenceProcessor::preclean_discovered_reflist(DiscoveredList& refs_list,
}
const char* ReferenceProcessor::list_name(int i) {
assert(i >= 0 && i <= _max_num_q * subclasses_of_ref, "Out of bounds index");
assert(i >= 0 && i <= _max_num_q * number_of_subclasses_of_ref(),
"Out of bounds index");
int j = i / _max_num_q;
switch (j) {
case 0: return "SoftRef";
......@@ -1493,7 +1359,7 @@ void ReferenceProcessor::verify_ok_to_handle_reflists() {
#ifndef PRODUCT
void ReferenceProcessor::clear_discovered_references() {
guarantee(!_discovering_refs, "Discovering refs?");
for (int i = 0; i < _max_num_q * subclasses_of_ref; i++) {
for (int i = 0; i < _max_num_q * number_of_subclasses_of_ref(); i++) {
clear_discovered_references(_discoveredSoftRefs[i]);
}
}
......
......@@ -48,18 +48,175 @@
// forward references
class ReferencePolicy;
class AbstractRefProcTaskExecutor;
class DiscoveredList;
// List of discovered references.
class DiscoveredList {
public:
DiscoveredList() : _len(0), _compressed_head(0), _oop_head(NULL) { }
oop head() const {
return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
_oop_head;
}
HeapWord* adr_head() {
return UseCompressedOops ? (HeapWord*)&_compressed_head :
(HeapWord*)&_oop_head;
}
void set_head(oop o) {
if (UseCompressedOops) {
// Must compress the head ptr.
_compressed_head = oopDesc::encode_heap_oop(o);
} else {
_oop_head = o;
}
}
bool is_empty() const { return head() == NULL; }
size_t length() { return _len; }
void set_length(size_t len) { _len = len; }
void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error"); }
void dec_length(size_t dec) { _len -= dec; }
private:
// Set value depending on UseCompressedOops. This could be a template class
// but then we have to fix all the instantiations and declarations that use this class.
oop _oop_head;
narrowOop _compressed_head;
size_t _len;
};
// Iterator for the list of discovered references.
class DiscoveredListIterator {
private:
DiscoveredList& _refs_list;
HeapWord* _prev_next;
oop _prev;
oop _ref;
HeapWord* _discovered_addr;
oop _next;
HeapWord* _referent_addr;
oop _referent;
OopClosure* _keep_alive;
BoolObjectClosure* _is_alive;
DEBUG_ONLY(
oop _first_seen; // cyclic linked list check
)
NOT_PRODUCT(
size_t _processed;
size_t _removed;
)
public:
inline DiscoveredListIterator(DiscoveredList& refs_list,
OopClosure* keep_alive,
BoolObjectClosure* is_alive):
_refs_list(refs_list),
_prev_next(refs_list.adr_head()),
_prev(NULL),
_ref(refs_list.head()),
#ifdef ASSERT
_first_seen(refs_list.head()),
#endif
#ifndef PRODUCT
_processed(0),
_removed(0),
#endif
_next(NULL),
_keep_alive(keep_alive),
_is_alive(is_alive)
{ }
// End Of List.
inline bool has_next() const { return _ref != NULL; }
// Get oop to the Reference object.
inline oop obj() const { return _ref; }
// Get oop to the referent object.
inline oop referent() const { return _referent; }
// Returns true if referent is alive.
inline bool is_referent_alive() const {
return _is_alive->do_object_b(_referent);
}
// Loads data for the current reference.
// The "allow_null_referent" argument tells us to allow for the possibility
// of a NULL referent in the discovered Reference object. This typically
// happens in the case of concurrent collectors that may have done the
// discovery concurrently, or interleaved, with mutator execution.
void load_ptrs(DEBUG_ONLY(bool allow_null_referent));
// Move to the next discovered reference.
inline void next() {
_prev_next = _discovered_addr;
_prev = _ref;
move_to_next();
}
// Remove the current reference from the list
void remove();
// Make the Reference object active again.
void make_active();
// Make the referent alive.
inline void make_referent_alive() {
if (UseCompressedOops) {
_keep_alive->do_oop((narrowOop*)_referent_addr);
} else {
_keep_alive->do_oop((oop*)_referent_addr);
}
}
// Update the discovered field.
inline void update_discovered() {
// First _prev_next ref actually points into DiscoveredList (gross).
if (UseCompressedOops) {
if (!oopDesc::is_null(*(narrowOop*)_prev_next)) {
_keep_alive->do_oop((narrowOop*)_prev_next);
}
} else {
if (!oopDesc::is_null(*(oop*)_prev_next)) {
_keep_alive->do_oop((oop*)_prev_next);
}
}
}
// NULL out referent pointer.
void clear_referent();
// Statistics
NOT_PRODUCT(
inline size_t processed() const { return _processed; }
inline size_t removed() const { return _removed; }
)
inline void move_to_next() {
if (_ref == _next) {
// End of the list.
_ref = NULL;
} else {
_ref = _next;
}
assert(_ref != _first_seen, "cyclic ref_list found");
NOT_PRODUCT(_processed++);
}
};
class ReferenceProcessor : public CHeapObj {
protected:
// Compatibility with pre-4965777 JDK's
static bool _pending_list_uses_discovered_field;
MemRegion _span; // (right-open) interval of heap
// subject to wkref discovery
bool _discovering_refs; // true when discovery enabled
bool _discovery_is_atomic; // if discovery is atomic wrt
// other collectors in configuration
bool _discovery_is_mt; // true if reference discovery is MT.
MemRegion _span; // (right-open) interval of heap
// subject to wkref discovery
bool _discovering_refs; // true when discovery enabled
bool _discovery_is_atomic; // if discovery is atomic wrt
// other collectors in configuration
bool _discovery_is_mt; // true if reference discovery is MT.
// If true, setting "next" field of a discovered refs list requires
// write barrier(s). (Must be true if used in a collector in which
// elements of a discovered list may be moved during discovery: for
......@@ -67,18 +224,19 @@ class ReferenceProcessor : public CHeapObj {
// long-term concurrent marking phase that does weak reference
// discovery.)
bool _discovered_list_needs_barrier;
BarrierSet* _bs; // Cached copy of BarrierSet.
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
// reference processing is MT.
int _next_id; // round-robin mod _num_q counter in
// support of work distribution
// For collectors that do not keep GC marking information
BarrierSet* _bs; // Cached copy of BarrierSet.
bool _enqueuing_is_done; // true if all weak references enqueued
bool _processing_is_mt; // true during phases when
// reference processing is MT.
int _next_id; // round-robin mod _num_q counter in
// support of work distribution
// For collectors that do not keep GC liveness information
// in the object header, this field holds a closure that
// helps the reference processor determine the reachability
// of an oop (the field is currently initialized to NULL for
// all collectors but the CMS collector).
// of an oop. It is currently initialized to NULL for all
// collectors except for CMS and G1.
BoolObjectClosure* _is_alive_non_header;
// Soft ref clearing policies
......@@ -102,10 +260,13 @@ class ReferenceProcessor : public CHeapObj {
DiscoveredList* _discoveredPhantomRefs;
public:
int num_q() { return _num_q; }
int max_num_q() { return _max_num_q; }
void set_active_mt_degree(int v) { _num_q = v; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); }
int num_q() { return _num_q; }
int max_num_q() { return _max_num_q; }
void set_active_mt_degree(int v) { _num_q = v; }
DiscoveredList* discovered_soft_refs() { return _discoveredSoftRefs; }
ReferencePolicy* setup_policy(bool always_clear) {
_current_soft_ref_policy = always_clear ?
_always_clear_soft_ref_policy : _default_soft_ref_policy;
......@@ -205,6 +366,11 @@ class ReferenceProcessor : public CHeapObj {
void enqueue_discovered_reflists(HeapWord* pending_list_addr, AbstractRefProcTaskExecutor* task_executor);
protected:
// Set the 'discovered' field of the given reference to
// the given value - emitting barriers depending upon
// the value of _discovered_list_needs_barrier.
void set_discovered(oop ref, oop value);
// "Preclean" the given discovered reference list
// by removing references with strongly reachable referents.
// Currently used in support of CMS only.
......@@ -290,7 +456,19 @@ class ReferenceProcessor : public CHeapObj {
void set_span(MemRegion span) { _span = span; }
// start and stop weak ref discovery
void enable_discovery() { _discovering_refs = true; }
void enable_discovery(bool verify_disabled, bool check_no_refs) {
#ifdef ASSERT
// Verify that we're not currently discovering refs
assert(!verify_disabled || !_discovering_refs, "nested call?");
if (check_no_refs) {
// Verify that the discovered lists are empty
verify_no_references_recorded();
}
#endif // ASSERT
_discovering_refs = true;
}
void disable_discovery() { _discovering_refs = false; }
bool discovery_enabled() { return _discovering_refs; }
......@@ -365,7 +543,7 @@ class NoRefDiscovery: StackObj {
~NoRefDiscovery() {
if (_was_discovering_refs) {
_rp->enable_discovery();
_rp->enable_discovery(true /*verify_disabled*/, false /*check_no_refs*/);
}
}
};
......
......@@ -753,8 +753,9 @@ bool Thread::claim_oops_do_par_case(int strong_roots_parity) {
jint thread_parity = _oops_do_parity;
if (thread_parity != strong_roots_parity) {
jint res = Atomic::cmpxchg(strong_roots_parity, &_oops_do_parity, thread_parity);
if (res == thread_parity) return true;
else {
if (res == thread_parity) {
return true;
} else {
guarantee(res == strong_roots_parity, "Or else what?");
assert(SharedHeap::heap()->n_par_threads() > 0,
"Should only fail when parallel.");
......@@ -3909,8 +3910,9 @@ void Threads::possibly_parallel_oops_do(OopClosure* f, CodeBlobClosure* cf) {
}
}
VMThread* vmt = VMThread::vm_thread();
if (vmt->claim_oops_do(is_par, cp))
if (vmt->claim_oops_do(is_par, cp)) {
vmt->oops_do(f, cf);
}
}
#ifndef SERIALGC
......
......@@ -182,6 +182,7 @@
#include "gc_implementation/parallelScavenge/psVirtualspace.hpp"
#include "gc_implementation/parallelScavenge/psYoungGen.hpp"
#include "gc_implementation/parallelScavenge/vmStructs_parallelgc.hpp"
#include "gc_implementation/g1/vmStructs_g1.hpp"
#endif
#ifdef COMPILER2
#include "opto/addnode.hpp"
......@@ -2878,6 +2879,9 @@ VMStructEntry VMStructs::localHotSpotVMStructs[] = {
VM_STRUCTS_CMS(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
GENERATE_STATIC_VM_STRUCT_ENTRY)
VM_STRUCTS_G1(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
GENERATE_STATIC_VM_STRUCT_ENTRY)
#endif // SERIALGC
VM_STRUCTS_CPU(GENERATE_NONSTATIC_VM_STRUCT_ENTRY, \
......@@ -2921,6 +2925,9 @@ VMTypeEntry VMStructs::localHotSpotVMTypes[] = {
GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
VM_TYPES_PARNEW(GENERATE_VM_TYPE_ENTRY)
VM_TYPES_G1(GENERATE_VM_TYPE_ENTRY,
GENERATE_TOPLEVEL_VM_TYPE_ENTRY)
#endif // SERIALGC
VM_TYPES_CPU(GENERATE_VM_TYPE_ENTRY,
......@@ -3020,6 +3027,9 @@ VMStructs::init() {
VM_STRUCTS_CMS(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_VOLATILE_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
VM_STRUCTS_G1(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
CHECK_STATIC_VM_STRUCT_ENTRY);
#endif // SERIALGC
VM_STRUCTS_CPU(CHECK_NONSTATIC_VM_STRUCT_ENTRY,
......@@ -3060,6 +3070,9 @@ VMStructs::init() {
CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
VM_TYPES_PARNEW(CHECK_VM_TYPE_ENTRY)
VM_TYPES_G1(CHECK_VM_TYPE_ENTRY,
CHECK_SINGLE_ARG_VM_TYPE_NO_OP);
#endif // SERIALGC
VM_TYPES_CPU(CHECK_VM_TYPE_ENTRY,
......@@ -3125,6 +3138,8 @@ VMStructs::init() {
debug_only(VM_STRUCTS_CMS(ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT));
debug_only(VM_STRUCTS_G1(ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT));
#endif // SERIALGC
debug_only(VM_STRUCTS_CPU(ENSURE_FIELD_TYPE_PRESENT, \
ENSURE_FIELD_TYPE_PRESENT, \
......
......@@ -32,71 +32,44 @@
G1MemoryPoolSuper::G1MemoryPoolSuper(G1CollectedHeap* g1h,
const char* name,
size_t init_size,
size_t max_size,
bool support_usage_threshold) :
_g1h(g1h), CollectedMemoryPool(name,
MemoryPool::Heap,
init_size,
undefined_max(),
support_usage_threshold) {
_g1mm(g1h->g1mm()), CollectedMemoryPool(name,
MemoryPool::Heap,
init_size,
max_size,
support_usage_threshold) {
assert(UseG1GC, "sanity");
}
// See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::eden_space_committed(G1CollectedHeap* g1h) {
return MAX2(eden_space_used(g1h), (size_t) HeapRegion::GrainBytes);
}
// See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::eden_space_used(G1CollectedHeap* g1h) {
return g1h->g1mm()->eden_space_used();
}
// See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::survivor_space_committed(G1CollectedHeap* g1h) {
return g1h->g1mm()->survivor_space_committed();
}
// See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::survivor_space_used(G1CollectedHeap* g1h) {
return g1h->g1mm()->survivor_space_used();
}
// See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::old_space_committed(G1CollectedHeap* g1h) {
return g1h->g1mm()->old_space_committed();
}
// See the comment at the top of g1MemoryPool.hpp
size_t G1MemoryPoolSuper::old_space_used(G1CollectedHeap* g1h) {
return g1h->g1mm()->old_space_used();
}
G1EdenPool::G1EdenPool(G1CollectedHeap* g1h) :
G1MemoryPoolSuper(g1h,
"G1 Eden",
eden_space_committed(g1h), /* init_size */
"G1 Eden Space",
g1h->g1mm()->eden_space_committed(), /* init_size */
_undefined_max,
false /* support_usage_threshold */) { }
MemoryUsage G1EdenPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = eden_space_committed(_g1h);
size_t committed = _g1mm->eden_space_committed();
return MemoryUsage(initial_sz, used, committed, max_sz);
}
G1SurvivorPool::G1SurvivorPool(G1CollectedHeap* g1h) :
G1MemoryPoolSuper(g1h,
"G1 Survivor",
survivor_space_committed(g1h), /* init_size */
"G1 Survivor Space",
g1h->g1mm()->survivor_space_committed(), /* init_size */
_undefined_max,
false /* support_usage_threshold */) { }
MemoryUsage G1SurvivorPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = survivor_space_committed(_g1h);
size_t committed = _g1mm->survivor_space_committed();
return MemoryUsage(initial_sz, used, committed, max_sz);
}
......@@ -104,14 +77,15 @@ MemoryUsage G1SurvivorPool::get_memory_usage() {
G1OldGenPool::G1OldGenPool(G1CollectedHeap* g1h) :
G1MemoryPoolSuper(g1h,
"G1 Old Gen",
old_space_committed(g1h), /* init_size */
g1h->g1mm()->old_space_committed(), /* init_size */
_undefined_max,
true /* support_usage_threshold */) { }
MemoryUsage G1OldGenPool::get_memory_usage() {
size_t initial_sz = initial_size();
size_t max_sz = max_size();
size_t used = used_in_bytes();
size_t committed = old_space_committed(_g1h);
size_t committed = _g1mm->old_space_committed();
return MemoryUsage(initial_sz, used, committed, max_sz);
}
......@@ -26,12 +26,11 @@
#define SHARE_VM_SERVICES_G1MEMORYPOOL_HPP
#ifndef SERIALGC
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "services/memoryPool.hpp"
#include "services/memoryUsage.hpp"
#endif
class G1CollectedHeap;
// This file contains the three classes that represent the memory
// pools of the G1 spaces: G1EdenPool, G1SurvivorPool, and
// G1OldGenPool. In G1, unlike our other GCs, we do not have a
......@@ -50,37 +49,19 @@ class G1CollectedHeap;
// on this model.
//
// This class is shared by the three G1 memory pool classes
// (G1EdenPool, G1SurvivorPool, G1OldGenPool). Given that the way we
// calculate used / committed bytes for these three pools is related
// (see comment above), we put the calculations in this class so that
// we can easily share them among the subclasses.
// (G1EdenPool, G1SurvivorPool, G1OldGenPool).
class G1MemoryPoolSuper : public CollectedMemoryPool {
protected:
G1CollectedHeap* _g1h;
const static size_t _undefined_max = (size_t) -1;
G1MonitoringSupport* _g1mm;
// Would only be called from subclasses.
G1MemoryPoolSuper(G1CollectedHeap* g1h,
const char* name,
size_t init_size,
size_t max_size,
bool support_usage_threshold);
// The reason why all the code is in static methods is so that it
// can be safely called from the constructors of the subclasses.
static size_t undefined_max() {
return (size_t) -1;
}
static size_t eden_space_committed(G1CollectedHeap* g1h);
static size_t eden_space_used(G1CollectedHeap* g1h);
static size_t survivor_space_committed(G1CollectedHeap* g1h);
static size_t survivor_space_used(G1CollectedHeap* g1h);
static size_t old_space_committed(G1CollectedHeap* g1h);
static size_t old_space_used(G1CollectedHeap* g1h);
};
// Memory pool that represents the G1 eden.
......@@ -89,10 +70,10 @@ public:
G1EdenPool(G1CollectedHeap* g1h);
size_t used_in_bytes() {
return eden_space_used(_g1h);
return _g1mm->eden_space_used();
}
size_t max_size() const {
return undefined_max();
return _undefined_max;
}
MemoryUsage get_memory_usage();
};
......@@ -103,10 +84,10 @@ public:
G1SurvivorPool(G1CollectedHeap* g1h);
size_t used_in_bytes() {
return survivor_space_used(_g1h);
return _g1mm->survivor_space_used();
}
size_t max_size() const {
return undefined_max();
return _undefined_max;
}
MemoryUsage get_memory_usage();
};
......@@ -117,10 +98,10 @@ public:
G1OldGenPool(G1CollectedHeap* g1h);
size_t used_in_bytes() {
return old_space_used(_g1h);
return _g1mm->old_space_used();
}
size_t max_size() const {
return undefined_max();
return _undefined_max;
}
MemoryUsage get_memory_usage();
};
......
......@@ -54,16 +54,18 @@ static int test_even_odd_comparator(int a, int b) {
return 1;
}
static int test_stdlib_comparator(const void* a, const void* b) {
int ai = *(int*)a;
int bi = *(int*)b;
if (ai == bi) {
return 0;
}
if (ai < bi) {
return -1;
extern "C" {
static int test_stdlib_comparator(const void* a, const void* b) {
int ai = *(int*)a;
int bi = *(int*)b;
if (ai == bi) {
return 0;
}
if (ai < bi) {
return -1;
}
return 1;
}
return 1;
}
void QuickSort::print_array(const char* prefix, int* array, int length) {
......@@ -92,7 +94,6 @@ bool QuickSort::sort_and_compare(int* arrayToSort, int* expectedResult, int leng
}
bool QuickSort::test_quick_sort() {
#if 0
tty->print_cr("test_quick_sort\n");
{
int* test_array = NULL;
......@@ -213,7 +214,6 @@ bool QuickSort::test_quick_sort() {
delete[] test_array;
delete[] expected_array;
}
#endif
return true;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册