提交 5c096f95 编写于 作者: A amurillo

Merge

上级 a1a56cbc da2f5384
master
Tags不可用
无相关合并请求
package sun.jvm.hotspot.gc_implementation.g1;
import java.util.Observable;
import java.util.Observer;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
public class G1Allocator extends VMObject {
//size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1Allocator");
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
}
public long getSummaryBytes() {
return summaryBytesUsedField.getValue(addr);
}
public G1Allocator(Address addr) {
super(addr);
}
}
......@@ -36,19 +36,18 @@ import sun.jvm.hotspot.memory.SpaceClosure;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObjectFactory;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for G1CollectedHeap.
public class G1CollectedHeap extends SharedHeap {
// HeapRegionSeq _seq;
static private long hrsFieldOffset;
// HeapRegionManager _hrm;
static private long hrmFieldOffset;
// MemRegion _g1_reserved;
static private long g1ReservedFieldOffset;
// size_t _summary_bytes_used;
static private CIntegerField summaryBytesUsedField;
// G1Allocator* _allocator
static private AddressField g1Allocator;
// G1MonitoringSupport* _g1mm;
static private AddressField g1mmField;
// HeapRegionSet _old_set;
......@@ -67,29 +66,29 @@ public class G1CollectedHeap extends SharedHeap {
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("G1CollectedHeap");
hrsFieldOffset = type.getField("_hrs").getOffset();
summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used");
hrmFieldOffset = type.getField("_hrm").getOffset();
g1Allocator = type.getAddressField("_allocator");
g1mmField = type.getAddressField("_g1mm");
oldSetFieldOffset = type.getField("_old_set").getOffset();
humongousSetFieldOffset = type.getField("_humongous_set").getOffset();
}
public long capacity() {
return hrs().capacity();
return hrm().capacity();
}
public long used() {
return summaryBytesUsedField.getValue(addr);
return allocator().getSummaryBytes();
}
public long n_regions() {
return hrs().length();
return hrm().length();
}
private HeapRegionSeq hrs() {
Address hrsAddr = addr.addOffsetTo(hrsFieldOffset);
return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class,
hrsAddr);
private HeapRegionManager hrm() {
Address hrmAddr = addr.addOffsetTo(hrmFieldOffset);
return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class,
hrmAddr);
}
public G1MonitoringSupport g1mm() {
......@@ -97,6 +96,11 @@ public class G1CollectedHeap extends SharedHeap {
return (G1MonitoringSupport) VMObjectFactory.newObject(G1MonitoringSupport.class, g1mmAddr);
}
public G1Allocator allocator() {
Address g1AllocatorAddr = g1Allocator.getValue(addr);
return (G1Allocator) VMObjectFactory.newObject(G1Allocator.class, g1AllocatorAddr);
}
public HeapRegionSetBase oldSet() {
Address oldSetAddr = addr.addOffsetTo(oldSetFieldOffset);
return (HeapRegionSetBase) VMObjectFactory.newObject(HeapRegionSetBase.class,
......@@ -110,7 +114,7 @@ public class G1CollectedHeap extends SharedHeap {
}
private Iterator<HeapRegion> heapRegionIterator() {
return hrs().heapRegionIterator();
return hrm().heapRegionIterator();
}
public void heapRegionIterate(SpaceClosure scl) {
......
/*
* Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -37,9 +37,9 @@ import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable.
// Mirror class for HeapRegionManager.
public class HeapRegionSeq extends VMObject {
public class HeapRegionManager extends VMObject {
// G1HeapRegionTable _regions
static private long regionsFieldOffset;
// uint _committed_length
......@@ -54,7 +54,7 @@ public class HeapRegionSeq extends VMObject {
}
static private synchronized void initialize(TypeDataBase db) {
Type type = db.lookupType("HeapRegionSeq");
Type type = db.lookupType("HeapRegionManager");
regionsFieldOffset = type.getField("_regions").getOffset();
numCommittedField = type.getCIntegerField("_num_committed");
......@@ -82,7 +82,7 @@ public class HeapRegionSeq extends VMObject {
return regions().heapRegionIterator(length());
}
public HeapRegionSeq(Address addr) {
public HeapRegionManager(Address addr) {
super(addr);
}
}
......@@ -302,7 +302,7 @@ export_optimized:
export_product_jdk::
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
export_optimized_jdk::
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR) generic_export
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export
export_fastdebug_jdk::
$(MAKE) BUILD_FLAVOR=$(@:export_%_jdk=%) ALT_EXPORT_PATH=$(JDK_IMAGE_DIR)/$(@:export_%_jdk=%) generic_export
export_debug_jdk::
......@@ -686,6 +686,19 @@ copy_debug_jdk::
($(CD) $(JDK_IMAGE_DIR)/debug && $(TAR) -xf -) ; \
fi
copy_optimized_jdk::
$(RM) -r $(JDK_IMAGE_DIR)/optimized
$(MKDIR) -p $(JDK_IMAGE_DIR)/optimized
if [ -d $(JDK_IMPORT_PATH)/optimized ] ; then \
($(CD) $(JDK_IMPORT_PATH)/optimized && \
$(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
else \
($(CD) $(JDK_IMPORT_PATH) && \
$(TAR) -cf - $(JDK_DIRS)) | \
($(CD) $(JDK_IMAGE_DIR)/optimized && $(TAR) -xf -) ; \
fi
#
# Check target
#
......
......@@ -21,6 +21,9 @@
# questions.
#
#
include $(GAMMADIR)/make/altsrc.make
ifeq ($(INCLUDE_JVMTI), false)
CXXFLAGS += -DINCLUDE_JVMTI=0
CFLAGS += -DINCLUDE_JVMTI=0
......@@ -70,19 +73,20 @@ ifeq ($(INCLUDE_CDS), false)
CXXFLAGS += -DINCLUDE_CDS=0
CFLAGS += -DINCLUDE_CDS=0
Src_Files_EXCLUDE += filemap.cpp metaspaceShared.cpp
Src_Files_EXCLUDE += filemap.cpp metaspaceShared*.cpp sharedPathsMiscInfo.cpp \
systemDictionaryShared.cpp classLoaderExt.cpp sharedClassUtil.cpp
endif
ifeq ($(INCLUDE_ALL_GCS), false)
CXXFLAGS += -DINCLUDE_ALL_GCS=0
CFLAGS += -DINCLUDE_ALL_GCS=0
gc_impl := $(GAMMADIR)/src/share/vm/gc_implementation
gc_exclude := \
$(notdir $(wildcard $(gc_impl)/concurrentMarkSweep/*.cpp)) \
$(notdir $(wildcard $(gc_impl)/g1/*.cpp)) \
$(notdir $(wildcard $(gc_impl)/parallelScavenge/*.cpp)) \
$(notdir $(wildcard $(gc_impl)/parNew/*.cpp))
gc_impl := $(HS_COMMON_SRC)/share/vm/gc_implementation
gc_impl_alt := $(HS_ALT_SRC)/share/vm/gc_implementation
gc_subdirs := concurrentMarkSweep g1 parallelScavenge parNew
gc_exclude := $(foreach gc,$(gc_subdirs), \
$(notdir $(wildcard $(gc_impl)/$(gc)/*.cpp)) \
$(notdir $(wildcard $(gc_impl_alt)/$(gc)/*.cpp)))
Src_Files_EXCLUDE += $(gc_exclude)
# Exclude everything in $(gc_impl)/shared except the files listed
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=08
HS_BUILD_NUMBER=09
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
......@@ -42,6 +42,9 @@ jprt_build_debugEmb:
jprt_build_fastdebugEmb:
$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_fastdebug
jprt_build_optimizedEmb:
$(MAKE) JAVASE_EMBEDDED=true MINIMIZE_RAM_USAGE=true jprt_build_optimized
jprt_build_productOpen:
$(MAKE) OPENJDK=true jprt_build_product
......@@ -51,6 +54,9 @@ jprt_build_debugOpen:
jprt_build_fastdebugOpen:
$(MAKE) OPENJDK=true jprt_build_fastdebug
jprt_build_optimizedOpen:
$(MAKE) OPENJDK=true jprt_build_optimized
jprt_build_product: all_product copy_product_jdk export_product_jdk
( $(CD) $(JDK_IMAGE_DIR) && \
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
......@@ -63,5 +69,9 @@ jprt_build_debug: all_debug copy_debug_jdk export_debug_jdk
( $(CD) $(JDK_IMAGE_DIR)/debug && \
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug
jprt_build_optimized: all_optimized copy_optimized_jdk export_optimized_jdk
( $(CD) $(JDK_IMAGE_DIR)/optimized && \
$(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
.PHONY: jprt_build_product jprt_build_fastdebug jprt_build_debug jprt_build_optimized
......@@ -113,13 +113,13 @@ jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
# Standard list of jprt build targets for this source tree
jprt.build.targets.standard= \
${jprt.my.solaris.sparcv9}-{product|fastdebug|optimized}, \
${jprt.my.solaris.sparcv9}-{product|fastdebug}, \
${jprt.my.solaris.x64}-{product|fastdebug}, \
${jprt.my.linux.i586}-{product|fastdebug}, \
${jprt.my.linux.x64}-{product|fastdebug|optimized}, \
${jprt.my.linux.x64}-{product|fastdebug}, \
${jprt.my.macosx.x64}-{product|fastdebug}, \
${jprt.my.windows.i586}-{product|fastdebug}, \
${jprt.my.windows.x64}-{product|fastdebug|optimized}, \
${jprt.my.windows.x64}-{product|fastdebug}, \
${jprt.my.linux.armvh}-{product|fastdebug}
jprt.build.targets.open= \
......
......@@ -612,6 +612,17 @@ void VM_Version::get_processor_features() {
#if INCLUDE_RTM_OPT
if (UseRTMLocking) {
if (is_intel_family_core()) {
if ((_model == CPU_MODEL_HASWELL_E3) ||
(_model == CPU_MODEL_HASWELL_E7 && _stepping < 3) ||
(_model == CPU_MODEL_BROADWELL && _stepping < 4)) {
if (!UnlockExperimentalVMOptions) {
vm_exit_during_initialization("UseRTMLocking is only available as experimental option on this platform. It must be enabled via -XX:+UnlockExperimentalVMOptions flag.");
} else {
warning("UseRTMLocking is only available as experimental option on this platform.");
}
}
}
if (!FLAG_IS_CMDLINE(UseRTMLocking)) {
// RTM locking should be used only for applications with
// high lock contention. For now we do not use it by default.
......
......@@ -276,7 +276,10 @@ protected:
CPU_MODEL_WESTMERE_EX = 0x2f,
CPU_MODEL_SANDYBRIDGE = 0x2a,
CPU_MODEL_SANDYBRIDGE_EP = 0x2d,
CPU_MODEL_IVYBRIDGE_EP = 0x3a
CPU_MODEL_IVYBRIDGE_EP = 0x3a,
CPU_MODEL_HASWELL_E3 = 0x3c,
CPU_MODEL_HASWELL_E7 = 0x3f,
CPU_MODEL_BROADWELL = 0x3d
} cpuExtendedFamily;
// cpuid information block. All info derived from executing cpuid with
......
......@@ -2244,7 +2244,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
const siginfo_t* si = (const siginfo_t*)siginfo;
os::Posix::print_siginfo_brief(st, si);
#if INCLUDE_CDS
if (si && (si->si_signo == SIGBUS || si->si_signo == SIGSEGV) &&
UseSharedSpaces) {
FileMapInfo* mapinfo = FileMapInfo::current_info();
......@@ -2254,6 +2254,7 @@ void os::print_siginfo(outputStream* st, void* siginfo) {
" possible disk/network problem.");
}
}
#endif
st->cr();
}
......
......@@ -31,6 +31,9 @@
#include "classfile/javaClasses.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#if INCLUDE_CDS
#include "classfile/systemDictionaryShared.hpp"
#endif
#include "classfile/verificationType.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
......@@ -60,6 +63,7 @@
#include "services/threadService.hpp"
#include "utilities/array.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
// We generally try to create the oops directly when parsing, rather than
// allocating temporary data structures and copying the bytes twice. A
......@@ -3737,7 +3741,15 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
instanceKlassHandle nullHandle;
// Figure out whether we can skip format checking (matching classic VM behavior)
_need_verify = Verifier::should_verify_for(class_loader(), verify);
if (DumpSharedSpaces) {
// verify == true means it's a 'remote' class (i.e., non-boot class)
// Verification decision is based on BytecodeVerificationRemote flag
// for those classes.
_need_verify = (verify) ? BytecodeVerificationRemote :
BytecodeVerificationLocal;
} else {
_need_verify = Verifier::should_verify_for(class_loader(), verify);
}
// Set the verify flag in stream
cfs->set_verify(_need_verify);
......@@ -3756,6 +3768,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
u2 minor_version = cfs->get_u2_fast();
u2 major_version = cfs->get_u2_fast();
if (DumpSharedSpaces && major_version < JAVA_1_5_VERSION) {
ResourceMark rm;
warning("Pre JDK 1.5 class not supported by CDS: %u.%u %s",
major_version, minor_version, name->as_C_string());
Exceptions::fthrow(
THREAD_AND_LOCATION,
vmSymbols::java_lang_UnsupportedClassVersionError(),
"Unsupported major.minor version for dump time %u.%u",
major_version,
minor_version);
}
// Check version numbers - we check this even with verifier off
if (!is_supported_version(major_version, minor_version)) {
if (name == NULL) {
......@@ -3863,6 +3887,18 @@ instanceKlassHandle ClassFileParser::parseClassFile(Symbol* name,
if (cfs->source() != NULL) tty->print(" from %s", cfs->source());
tty->print_cr("]");
}
#if INCLUDE_CDS
if (DumpLoadedClassList != NULL && cfs->source() != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
if (name != NULL) {
ResourceMark rm(THREAD);
classlist_file->print_cr("%s", name->as_C_string());
classlist_file->flush();
}
}
}
#endif
u2 super_class_index = cfs->get_u2_fast();
instanceKlassHandle super_klass = parse_super_class(super_class_index,
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -30,7 +30,7 @@ void ClassFileStream::truncated_file_error(TRAPS) {
THROW_MSG(vmSymbols::java_lang_ClassFormatError(), "Truncated class file");
}
ClassFileStream::ClassFileStream(u1* buffer, int length, char* source) {
ClassFileStream::ClassFileStream(u1* buffer, int length, const char* source) {
_buffer_start = buffer;
_buffer_end = buffer + length;
_current = buffer;
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -53,20 +53,20 @@ class ClassFileStream: public ResourceObj {
u1* _buffer_start; // Buffer bottom
u1* _buffer_end; // Buffer top (one past last element)
u1* _current; // Current buffer position
char* _source; // Source of stream (directory name, ZIP/JAR archive name)
const char* _source; // Source of stream (directory name, ZIP/JAR archive name)
bool _need_verify; // True if verification is on for the class file
void truncated_file_error(TRAPS);
public:
// Constructor
ClassFileStream(u1* buffer, int length, char* source);
ClassFileStream(u1* buffer, int length, const char* source);
// Buffer access
u1* buffer() const { return _buffer_start; }
int length() const { return _buffer_end - _buffer_start; }
u1* current() const { return _current; }
void set_current(u1* pos) { _current = pos; }
char* source() const { return _source; }
const char* source() const { return _source; }
void set_verify(bool flag) { _need_verify = flag; }
void check_truncated_file(bool b, TRAPS) {
......
......@@ -72,11 +72,11 @@ class ClassPathEntry: public CHeapObj<mtClass> {
class ClassPathDirEntry: public ClassPathEntry {
private:
char* _dir; // Name of directory
const char* _dir; // Name of directory
public:
bool is_jar_file() { return false; }
const char* name() { return _dir; }
ClassPathDirEntry(char* dir);
ClassPathDirEntry(const char* dir);
ClassFileStream* open_stream(const char* name, TRAPS);
// Debugging
NOT_PRODUCT(void compile_the_world(Handle loader, TRAPS);)
......@@ -100,13 +100,14 @@ typedef struct {
class ClassPathZipEntry: public ClassPathEntry {
private:
jzfile* _zip; // The zip archive
char* _zip_name; // Name of zip archive
jzfile* _zip; // The zip archive
const char* _zip_name; // Name of zip archive
public:
bool is_jar_file() { return true; }
const char* name() { return _zip_name; }
ClassPathZipEntry(jzfile* zip, const char* zip_name);
~ClassPathZipEntry();
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void contents_do(void f(const char* name, void* context), void* context);
// Debugging
......@@ -122,16 +123,18 @@ class ClassPathZipEntry: public ClassPathEntry {
// For lazier loading of boot class path entries
class LazyClassPathEntry: public ClassPathEntry {
private:
char* _path; // dir or file
const char* _path; // dir or file
struct stat _st;
MetaIndex* _meta_index;
bool _has_error;
bool _throw_exception;
volatile ClassPathEntry* _resolved_entry;
ClassPathEntry* resolve_entry(TRAPS);
public:
bool is_jar_file();
const char* name() { return _path; }
LazyClassPathEntry(char* path, const struct stat* st);
LazyClassPathEntry(const char* path, const struct stat* st, bool throw_exception);
u1* open_entry(const char* name, jint* filesize, bool nul_terminate, TRAPS);
ClassFileStream* open_stream(const char* name, TRAPS);
void set_meta_index(MetaIndex* meta_index) { _meta_index = meta_index; }
virtual bool is_lazy();
......@@ -142,6 +145,7 @@ class LazyClassPathEntry: public ClassPathEntry {
class PackageHashtable;
class PackageInfo;
class SharedPathsMiscInfo;
template <MEMFLAGS F> class HashtableBucket;
class ClassLoader: AllStatic {
......@@ -149,7 +153,7 @@ class ClassLoader: AllStatic {
enum SomeConstants {
package_hash_table_size = 31 // Number of buckets
};
private:
protected:
friend class LazyClassPathEntry;
// Performance counters
......@@ -191,10 +195,15 @@ class ClassLoader: AllStatic {
static ClassPathEntry* _first_entry;
// Last entry in linked list of ClassPathEntry instances
static ClassPathEntry* _last_entry;
static int _num_entries;
// Hash table used to keep track of loaded packages
static PackageHashtable* _package_hash_table;
static const char* _shared_archive;
// Info used by CDS
CDS_ONLY(static SharedPathsMiscInfo * _shared_paths_misc_info;)
// Hash function
static unsigned int hash(const char *s, int n);
// Returns the package file name corresponding to the specified package
......@@ -205,19 +214,23 @@ class ClassLoader: AllStatic {
static bool add_package(const char *pkgname, int classpath_index, TRAPS);
// Initialization
static void setup_meta_index();
static void setup_bootstrap_meta_index();
static void setup_meta_index(const char* meta_index_path, const char* meta_index_dir,
int start_index);
static void setup_bootstrap_search_path();
static void setup_search_path(const char *class_path);
static void load_zip_library();
static ClassPathEntry* create_class_path_entry(char *path, const struct stat* st,
bool lazy, TRAPS);
static ClassPathEntry* create_class_path_entry(const char *path, const struct stat* st,
bool lazy, bool throw_exception, TRAPS);
// Canonicalizes path names, so strcmp will work properly. This is mainly
// to avoid confusing the zip library
static bool get_canonical_path(char* orig, char* out, int len);
static bool get_canonical_path(const char* orig, char* out, int len);
public:
// Used by the kernel jvm.
static void update_class_path_entry_list(char *path,
bool check_for_duplicates);
static bool update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool throw_exception=true);
static void print_bootclasspath();
// Timing
......@@ -300,6 +313,7 @@ class ClassLoader: AllStatic {
// Initialization
static void initialize();
CDS_ONLY(static void initialize_shared_path();)
static void create_package_info_table();
static void create_package_info_table(HashtableBucket<mtClass> *t, int length,
int number_of_entries);
......@@ -314,10 +328,21 @@ class ClassLoader: AllStatic {
return e;
}
#if INCLUDE_CDS
// Sharing dump and restore
static void copy_package_info_buckets(char** top, char* end);
static void copy_package_info_table(char** top, char* end);
static void check_shared_classpath(const char *path);
static void finalize_shared_paths_misc_info();
static int get_shared_paths_misc_info_size();
static void* get_shared_paths_misc_info();
static bool check_shared_paths_misc_info(void* info, int size);
static void exit_with_path_failure(const char* error, const char* message);
#endif
static void trace_class_path(const char* msg, const char* name = NULL);
// VM monitoring and management support
static jlong classloader_time_ms();
static jlong class_method_total_size();
......@@ -341,7 +366,7 @@ class ClassLoader: AllStatic {
// Force compilation of all methods in all classes in bootstrap class path (stress test)
#ifndef PRODUCT
private:
protected:
static int _compile_the_world_class_counter;
static int _compile_the_world_method_counter;
public:
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#define SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
#include "classfile/classLoader.hpp"
class ClassLoaderExt: public ClassLoader { // AllStatic
public:
class Context {
const char* _file_name;
public:
Context(const char* class_name, const char* file_name, TRAPS) {
_file_name = file_name;
}
bool check(ClassFileStream* stream, const int classpath_index) {
return true;
}
bool should_verify(int classpath_index) {
return false;
}
instanceKlassHandle record_result(const int classpath_index,
ClassPathEntry* e, instanceKlassHandle result, TRAPS) {
if (ClassLoader::add_package(_file_name, classpath_index, THREAD)) {
if (DumpSharedSpaces) {
result->set_shared_classpath_index(classpath_index);
}
return result;
} else {
return instanceKlassHandle(); // NULL
}
}
};
static void add_class_path_entry(const char* path, bool check_for_duplicates,
ClassPathEntry* new_entry) {
ClassLoader::add_to_list(new_entry);
}
static void setup_search_paths() {}
};
#endif // SHARE_VM_CLASSFILE_CLASSLOADEREXT_HPP
......@@ -209,6 +209,29 @@ void Dictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
_pd_cache_table->roots_oops_do(strong, weak);
}
void Dictionary::remove_classes_in_error_state() {
assert(DumpSharedSpaces, "supported only when dumping");
DictionaryEntry* probe = NULL;
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry** p = bucket_addr(index); *p != NULL; ) {
probe = *p;
InstanceKlass* ik = InstanceKlass::cast(probe->klass());
if (ik->is_in_error_state()) { // purge this entry
*p = probe->next();
if (probe == _current_class_entry) {
_current_class_entry = NULL;
}
free_entry(probe);
ResourceMark rm;
tty->print_cr("Removed error class: %s", ik->external_name());
continue;
}
p = probe->next_addr();
}
}
}
void Dictionary::always_strong_oops_do(OopClosure* blk) {
// Follow all system classes and temporary placeholders in dictionary; only
// protection domain oops contain references into the heap. In a first
......@@ -682,16 +705,17 @@ void SymbolPropertyTable::methods_do(void f(Method*)) {
// ----------------------------------------------------------------------------
#ifndef PRODUCT
void Dictionary::print() {
void Dictionary::print(bool details) {
ResourceMark rm;
HandleMark hm;
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
if (details) {
tty->print_cr("Java system dictionary (table_size=%d, classes=%d)",
table_size(), number_of_entries());
tty->print_cr("^ indicates that initiating loader is different from "
"defining loader");
}
for (int index = 0; index < table_size(); index++) {
for (DictionaryEntry* probe = bucket(index);
......@@ -702,21 +726,28 @@ void Dictionary::print() {
ClassLoaderData* loader_data = probe->loader_data();
bool is_defining_class =
(loader_data == InstanceKlass::cast(e)->class_loader_data());
tty->print("%s%s", is_defining_class ? " " : "^",
tty->print("%s%s", ((!details) || is_defining_class) ? " " : "^",
e->external_name());
if (details) {
tty->print(", loader ");
loader_data->print_value();
if (loader_data != NULL) {
loader_data->print_value();
} else {
tty->print("NULL");
}
}
tty->cr();
}
}
tty->cr();
_pd_cache_table->print();
if (details) {
tty->cr();
_pd_cache_table->print();
}
tty->cr();
}
#endif
void Dictionary::verify() {
guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
......
......@@ -100,6 +100,7 @@ public:
void methods_do(void f(Method*));
void unlink(BoolObjectClosure* is_alive);
void remove_classes_in_error_state();
// Classes loaded by the bootstrap loader are always strongly reachable.
// If we're not doing class unloading, all classes are strongly reachable.
......@@ -126,9 +127,7 @@ public:
ProtectionDomainCacheEntry* cache_get(oop protection_domain);
#ifndef PRODUCT
void print();
#endif
void print(bool details = true);
void verify();
};
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
#define SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/filemap.hpp"
class SharedClassUtil : AllStatic {
public:
static SharedPathsMiscInfo* allocate_shared_paths_misc_info() {
return new SharedPathsMiscInfo();
}
static SharedPathsMiscInfo* allocate_shared_paths_misc_info(char* buf, int size) {
return new SharedPathsMiscInfo(buf, size);
}
static FileMapInfo::FileMapHeader* allocate_file_map_header() {
return new FileMapInfo::FileMapHeader();
}
static size_t file_map_header_size() {
return sizeof(FileMapInfo::FileMapHeader);
}
static size_t shared_class_path_entry_size() {
return sizeof(SharedClassPathEntry);
}
static void update_shared_classpath(ClassPathEntry *cpe,
SharedClassPathEntry* ent,
time_t timestamp,
long filesize, TRAPS) {
ent->_timestamp = timestamp;
ent->_filesize = filesize;
}
static void initialize(TRAPS) {}
inline static bool is_shared_boot_class(Klass* klass) {
return (klass->_shared_class_path_index >= 0);
}
};
#endif // SHARE_VM_CLASSFILE_SHAREDCLASSUTIL_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/sharedPathsMiscInfo.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/metaspaceShared.hpp"
#include "runtime/arguments.hpp"
void SharedPathsMiscInfo::add_path(const char* path, int type) {
if (TraceClassPaths) {
tty->print("[type=%s] ", type_name(type));
trace_class_path("[Add misc shared path ", path);
}
write(path, strlen(path) + 1);
write_jint(jint(type));
}
void SharedPathsMiscInfo::ensure_size(size_t needed_bytes) {
assert(_allocated, "cannot modify buffer during validation.");
int used = get_used_bytes();
int target = used + int(needed_bytes);
if (target > _buf_size) {
_buf_size = _buf_size * 2 + (int)needed_bytes;
_buf_start = REALLOC_C_HEAP_ARRAY(char, _buf_start, _buf_size, mtClass);
_cur_ptr = _buf_start + used;
_end_ptr = _buf_start + _buf_size;
}
}
void SharedPathsMiscInfo::write(const void* ptr, size_t size) {
ensure_size(size);
memcpy(_cur_ptr, ptr, size);
_cur_ptr += size;
}
bool SharedPathsMiscInfo::read(void* ptr, size_t size) {
if (_cur_ptr + size <= _end_ptr) {
memcpy(ptr, _cur_ptr, size);
_cur_ptr += size;
return true;
}
return false;
}
bool SharedPathsMiscInfo::fail(const char* msg, const char* name) {
ClassLoader::trace_class_path(msg, name);
MetaspaceShared::set_archive_loading_failed();
return false;
}
bool SharedPathsMiscInfo::check() {
// The whole buffer must be 0 terminated so that we can use strlen and strcmp
// without fear.
_end_ptr -= sizeof(jint);
if (_cur_ptr >= _end_ptr) {
return fail("Truncated archive file header");
}
if (*_end_ptr != 0) {
return fail("Corrupted archive file header");
}
while (_cur_ptr < _end_ptr) {
jint type;
const char* path = _cur_ptr;
_cur_ptr += strlen(path) + 1;
if (!read_jint(&type)) {
return fail("Corrupted archive file header");
}
if (TraceClassPaths) {
tty->print("[type=%s ", type_name(type));
print_path(tty, type, path);
tty->print_cr("]");
}
if (!check(type, path)) {
if (!PrintSharedArchiveAndExit) {
return false;
}
} else {
trace_class_path("[ok");
}
}
return true;
}
bool SharedPathsMiscInfo::check(jint type, const char* path) {
switch (type) {
case BOOT:
if (strcmp(path, Arguments::get_sysclasspath()) != 0) {
return fail("[BOOT classpath mismatch, actual: -Dsun.boot.class.path=", Arguments::get_sysclasspath());
}
break;
case NON_EXIST: // fall-through
case REQUIRED:
{
struct stat st;
if (os::stat(path, &st) != 0) {
// The file does not actually exist
if (type == REQUIRED) {
// but we require it to exist -> fail
return fail("Required file doesn't exist");
}
} else {
// The file actually exists
if (type == NON_EXIST) {
// But we want it to not exist -> fail
return fail("File must not exist");
}
time_t timestamp;
long filesize;
if (!read_time(&timestamp) || !read_long(&filesize)) {
return fail("Corrupted archive file header");
}
if (timestamp != st.st_mtime) {
return fail("Timestamp mismatch");
}
if (filesize != st.st_size) {
return fail("File size mismatch");
}
}
}
break;
default:
return fail("Corrupted archive file header");
}
return true;
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
#define SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
#include "runtime/os.hpp"
// During dumping time, when processing class paths, we build up the dump-time
// classpath. The JAR files that exist are stored in the list ClassLoader::_first_entry.
// However, we need to store other "misc" information for run-time checking, such as
//
// + The values of Arguments::get_sysclasspath() used during dumping.
//
// + The meta-index file(s) used during dumping (incl modification time and size)
//
// + The class path elements specified during dumping but did not exist --
// these elements must also be specified at run time, and they also must not
// exist at run time.
//
// These misc items are stored in a linear buffer in SharedPathsMiscInfo.
// The storage format is stream oriented to minimize its size.
//
// When writing the information to the archive file, SharedPathsMiscInfo is stored in
// the archive file header. At run-time, this information is used only during initialization
// (accessed using read() instead of mmap()), and is deallocated afterwards to save space.
//
// The SharedPathsMiscInfo class is used for both creating the the information (during
// dumping time) and validation (at run time). Different constructors are used in the
// two situations. See below.
class SharedPathsMiscInfo : public CHeapObj<mtClass> {
protected:
char* _buf_start;
char* _cur_ptr;
char* _end_ptr;
int _buf_size;
bool _allocated; // was _buf_start allocated by me?
void ensure_size(size_t needed_bytes);
void add_path(const char* path, int type);
void write(const void* ptr, size_t size);
bool read(void* ptr, size_t size);
static void trace_class_path(const char* msg, const char* name = NULL) {
ClassLoader::trace_class_path(msg, name);
}
protected:
static bool fail(const char* msg, const char* name = NULL);
virtual bool check(jint type, const char* path);
public:
enum {
INITIAL_BUF_SIZE = 128
};
// This constructor is used when creating the misc information (during dump)
SharedPathsMiscInfo() {
_buf_size = INITIAL_BUF_SIZE;
_cur_ptr = _buf_start = NEW_C_HEAP_ARRAY(char, _buf_size, mtClass);
_allocated = true;
}
// This constructor is used when validating the misc info (during run time)
SharedPathsMiscInfo(char *buff, int size) {
_cur_ptr = _buf_start = buff;
_end_ptr = _buf_start + size;
_buf_size = size;
_allocated = false;
}
~SharedPathsMiscInfo() {
if (_allocated) {
FREE_C_HEAP_ARRAY(char, _buf_start, mtClass);
}
}
int get_used_bytes() {
return _cur_ptr - _buf_start;
}
void* buffer() {
return _buf_start;
}
// writing --
// The path must not exist at run-time
void add_nonexist_path(const char* path) {
add_path(path, NON_EXIST);
}
// The path must exist and have required size and modification time
void add_required_file(const char* path) {
add_path(path, REQUIRED);
struct stat st;
if (os::stat(path, &st) != 0) {
assert(0, "sanity");
ClassLoader::exit_with_path_failure("failed to os::stat(%s)", path); // should not happen
}
write_time(st.st_mtime);
write_long(st.st_size);
}
// The path must exist, and must contain exactly <num_entries> files/dirs
void add_boot_classpath(const char* path) {
add_path(path, BOOT);
}
int write_jint(jint num) {
write(&num, sizeof(num));
return 0;
}
void write_time(time_t t) {
write(&t, sizeof(t));
}
void write_long(long l) {
write(&l, sizeof(l));
}
bool dump_to_file(int fd) {
int n = get_used_bytes();
return (os::write(fd, _buf_start, n) == (size_t)n);
}
// reading --
enum {
BOOT = 1,
NON_EXIST = 2,
REQUIRED = 3
};
virtual const char* type_name(int type) {
switch (type) {
case BOOT: return "BOOT";
case NON_EXIST: return "NON_EXIST";
case REQUIRED: return "REQUIRED";
default: ShouldNotReachHere(); return "?";
}
}
virtual void print_path(outputStream* out, int type, const char* path) {
switch (type) {
case BOOT:
out->print("Expecting -Dsun.boot.class.path=%s", path);
break;
case NON_EXIST:
out->print("Expecting that %s does not exist", path);
break;
case REQUIRED:
out->print("Expecting that file %s must exist and is not altered", path);
break;
default:
ShouldNotReachHere();
}
}
bool check();
bool read_jint(jint *ptr) {
return read(ptr, sizeof(jint));
}
bool read_long(long *ptr) {
return read(ptr, sizeof(long));
}
bool read_time(time_t *ptr) {
return read(ptr, sizeof(time_t));
}
};
#endif // SHARE_VM_CLASSFILE_SHAREDPATHSMISCINFO_HPP
......@@ -30,10 +30,15 @@
#include "classfile/placeholders.hpp"
#include "classfile/resolutionErrors.hpp"
#include "classfile/systemDictionary.hpp"
#if INCLUDE_CDS
#include "classfile/sharedClassUtil.hpp"
#include "classfile/systemDictionaryShared.hpp"
#endif
#include "classfile/vmSymbols.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/bytecodeStream.hpp"
#include "interpreter/interpreter.hpp"
#include "memory/filemap.hpp"
#include "memory/gcLocker.hpp"
#include "memory/oopFactory.hpp"
#include "oops/instanceKlass.hpp"
......@@ -109,6 +114,8 @@ void SystemDictionary::compute_java_system_loader(TRAPS) {
CHECK);
_java_system_loader = (oop)result.get_jobject();
CDS_ONLY(SystemDictionaryShared::initialize(CHECK);)
}
......@@ -974,6 +981,7 @@ Klass* SystemDictionary::parse_stream(Symbol* class_name,
// as the host_klass
assert(EnableInvokeDynamic, "");
guarantee(host_klass->class_loader() == class_loader(), "should be the same");
guarantee(!DumpSharedSpaces, "must not create anonymous classes when dumping");
loader_data = ClassLoaderData::anonymous_class_loader_data(class_loader(), CHECK_NULL);
loader_data->record_dependency(host_klass(), CHECK_NULL);
} else {
......@@ -1135,7 +1143,7 @@ Klass* SystemDictionary::resolve_from_stream(Symbol* class_name,
return k();
}
#if INCLUDE_CDS
void SystemDictionary::set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries) {
assert(length == _nof_buckets * sizeof(HashtableBucket<mtClass>),
......@@ -1168,15 +1176,21 @@ Klass* SystemDictionary::find_shared_class(Symbol* class_name) {
instanceKlassHandle SystemDictionary::load_shared_class(
Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle ik (THREAD, find_shared_class(class_name));
return load_shared_class(ik, class_loader, THREAD);
// Make sure we only return the boot class for the NULL classloader.
if (ik.not_null() &&
SharedClassUtil::is_shared_boot_class(ik()) && class_loader.is_null()) {
Handle protection_domain;
return load_shared_class(ik, class_loader, protection_domain, THREAD);
}
return instanceKlassHandle();
}
instanceKlassHandle SystemDictionary::load_shared_class(
instanceKlassHandle ik, Handle class_loader, TRAPS) {
assert(class_loader.is_null(), "non-null classloader for shared class?");
instanceKlassHandle SystemDictionary::load_shared_class(instanceKlassHandle ik,
Handle class_loader,
Handle protection_domain, TRAPS) {
if (ik.not_null()) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
Symbol* class_name = ik->name();
Symbol* class_name = ik->name();
// Found the class, now load the superclass and interfaces. If they
// are shared, add them to the main system dictionary and reset
......@@ -1185,7 +1199,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(
if (ik->super() != NULL) {
Symbol* cn = ik->super()->name();
resolve_super_or_fail(class_name, cn,
class_loader, Handle(), true, CHECK_(nh));
class_loader, protection_domain, true, CHECK_(nh));
}
Array<Klass*>* interfaces = ik->local_interfaces();
......@@ -1198,7 +1212,7 @@ instanceKlassHandle SystemDictionary::load_shared_class(
// reinitialized yet (they will be once the interface classes
// are loaded)
Symbol* name = k->name();
resolve_super_or_fail(class_name, name, class_loader, Handle(), false, CHECK_(nh));
resolve_super_or_fail(class_name, name, class_loader, protection_domain, false, CHECK_(nh));
}
// Adjust methods to recover missing data. They need addresses for
......@@ -1207,30 +1221,45 @@ instanceKlassHandle SystemDictionary::load_shared_class(
// Updating methods must be done under a lock so multiple
// threads don't update these in parallel
// Shared classes are all currently loaded by the bootstrap
// classloader, so this will never cause a deadlock on
// a custom class loader lock.
//
// Shared classes are all currently loaded by either the bootstrap or
// internal parallel class loaders, so this will never cause a deadlock
// on a custom class loader lock.
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
{
Handle lockObject = compute_loader_lock_object(class_loader, THREAD);
check_loader_lock_contention(lockObject, THREAD);
ObjectLocker ol(lockObject, THREAD, true);
ik->restore_unshareable_info(CHECK_(nh));
ik->restore_unshareable_info(loader_data, protection_domain, CHECK_(nh));
}
if (TraceClassLoading) {
ResourceMark rm;
tty->print("[Loaded %s", ik->external_name());
tty->print(" from shared objects file");
if (class_loader.not_null()) {
tty->print(" by %s", loader_data->loader_name());
}
tty->print_cr("]");
}
if (DumpLoadedClassList != NULL && classlist_file->is_open()) {
// Only dump the classes that can be stored into CDS archive
if (SystemDictionaryShared::is_sharing_possible(loader_data)) {
ResourceMark rm(THREAD);
classlist_file->print_cr("%s", ik->name()->as_C_string());
classlist_file->flush();
}
}
// notify a class loaded from shared object
ClassLoadingService::notify_class_loaded(InstanceKlass::cast(ik()),
true /* shared class */);
}
return ik;
}
#endif // INCLUDE_CDS
instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Handle class_loader, TRAPS) {
instanceKlassHandle nh = instanceKlassHandle(); // null Handle
......@@ -1240,8 +1269,10 @@ instanceKlassHandle SystemDictionary::load_instance_class(Symbol* class_name, Ha
// shared spaces.
instanceKlassHandle k;
{
#if INCLUDE_CDS
PerfTraceTime vmtimer(ClassLoader::perf_shared_classload_time());
k = load_shared_class(class_name, class_loader, THREAD);
#endif
}
if (k.is_null()) {
......@@ -1600,7 +1631,6 @@ void SystemDictionary::add_to_hierarchy(instanceKlassHandle k, TRAPS) {
Universe::flush_dependents_on(k);
}
// ----------------------------------------------------------------------------
// GC support
......@@ -1682,6 +1712,7 @@ bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
strong->do_oop(&_java_system_loader);
strong->do_oop(&_system_loader_lock_obj);
CDS_ONLY(SystemDictionaryShared::roots_oops_do(strong);)
// Adjust dictionary
dictionary()->roots_oops_do(strong, weak);
......@@ -1693,6 +1724,7 @@ void SystemDictionary::roots_oops_do(OopClosure* strong, OopClosure* weak) {
void SystemDictionary::oops_do(OopClosure* f) {
f->do_oop(&_java_system_loader);
f->do_oop(&_system_loader_lock_obj);
CDS_ONLY(SystemDictionaryShared::oops_do(f);)
// Adjust dictionary
dictionary()->oops_do(f);
......@@ -1754,6 +1786,10 @@ void SystemDictionary::methods_do(void f(Method*)) {
invoke_method_table()->methods_do(f);
}
void SystemDictionary::remove_classes_in_error_state() {
dictionary()->remove_classes_in_error_state();
}
// ----------------------------------------------------------------------------
// Lazily load klasses
......@@ -2566,10 +2602,12 @@ int SystemDictionary::number_of_classes() {
// ----------------------------------------------------------------------------
#ifndef PRODUCT
void SystemDictionary::print_shared(bool details) {
shared_dictionary()->print(details);
}
void SystemDictionary::print() {
dictionary()->print();
void SystemDictionary::print(bool details) {
dictionary()->print(details);
// Placeholders
GCMutexLocker mu(SystemDictionary_lock);
......@@ -2579,7 +2617,6 @@ void SystemDictionary::print() {
constraints()->print();
}
#endif
void SystemDictionary::verify() {
guarantee(dictionary() != NULL, "Verify of system dictionary failed");
......
......@@ -111,6 +111,7 @@ class Ticks;
do_klass(SecurityManager_klass, java_lang_SecurityManager, Pre ) \
do_klass(ProtectionDomain_klass, java_security_ProtectionDomain, Pre ) \
do_klass(AccessControlContext_klass, java_security_AccessControlContext, Pre ) \
do_klass(SecureClassLoader_klass, java_security_SecureClassLoader, Pre ) \
do_klass(ClassNotFoundException_klass, java_lang_ClassNotFoundException, Pre ) \
do_klass(NoClassDefFoundError_klass, java_lang_NoClassDefFoundError, Pre ) \
do_klass(LinkageError_klass, java_lang_LinkageError, Pre ) \
......@@ -167,6 +168,15 @@ class Ticks;
do_klass(StringBuilder_klass, java_lang_StringBuilder, Pre ) \
do_klass(misc_Unsafe_klass, sun_misc_Unsafe, Pre ) \
\
/* support for CDS */ \
do_klass(ByteArrayInputStream_klass, java_io_ByteArrayInputStream, Pre ) \
do_klass(File_klass, java_io_File, Pre ) \
do_klass(URLClassLoader_klass, java_net_URLClassLoader, Pre ) \
do_klass(URL_klass, java_net_URL, Pre ) \
do_klass(Jar_Manifest_klass, java_util_jar_Manifest, Pre ) \
do_klass(sun_misc_Launcher_klass, sun_misc_Launcher, Pre ) \
do_klass(CodeSource_klass, java_security_CodeSource, Pre ) \
\
/* It's NULL in non-1.4 JDKs. */ \
do_klass(StackTraceElement_klass, java_lang_StackTraceElement, Opt ) \
/* Universe::is_gte_jdk14x_version() is not set up by this point. */ \
......@@ -226,7 +236,7 @@ class SystemDictionary : AllStatic {
static Klass* resolve_or_fail(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, TRAPS);
// Convenient call for null loader and protection domain.
static Klass* resolve_or_fail(Symbol* class_name, bool throw_error, TRAPS);
private:
protected:
// handle error translation for resolve_or_null results
static Klass* handle_resolution_exception(Symbol* class_name, Handle class_loader, Handle protection_domain, bool throw_error, KlassHandle klass_h, TRAPS);
......@@ -331,6 +341,9 @@ public:
// loaders. Returns "true" iff something was unloaded.
static bool do_unloading(BoolObjectClosure* is_alive);
// Used by DumpSharedSpaces only to remove classes that failed verification
static void remove_classes_in_error_state();
static int calculate_systemdictionary_size(int loadedclasses);
// Applies "f->do_oop" to all root oops in the system dictionary.
......@@ -340,7 +353,7 @@ public:
// System loader lock
static oop system_loader_lock() { return _system_loader_lock_obj; }
private:
protected:
// Extended Redefine classes support (tbi)
static void preloaded_classes_do(KlassClosure* f);
static void lazily_loaded_classes_do(KlassClosure* f);
......@@ -353,7 +366,8 @@ public:
static void set_shared_dictionary(HashtableBucket<mtClass>* t, int length,
int number_of_entries);
// Printing
static void print() PRODUCT_RETURN;
static void print(bool details = true);
static void print_shared(bool details = true);
static void print_class_statistics() PRODUCT_RETURN;
static void print_method_statistics() PRODUCT_RETURN;
......@@ -439,7 +453,7 @@ public:
static void load_abstract_ownable_synchronizer_klass(TRAPS);
private:
protected:
// Tells whether ClassLoader.loadClassInternal is present
static bool has_loadClassInternal() { return _has_loadClassInternal; }
......@@ -467,7 +481,7 @@ public:
// Register a new class loader
static ClassLoaderData* register_loader(Handle class_loader, TRAPS);
private:
protected:
// Mirrors for primitive classes (created eagerly)
static oop check_mirror(oop m) {
assert(m != NULL, "mirror not initialized");
......@@ -536,7 +550,7 @@ public:
static void delete_resolution_error(ConstantPool* pool);
static Symbol* find_resolution_error(constantPoolHandle pool, int which);
private:
protected:
enum Constants {
_loader_constraint_size = 107, // number of entries in constraint table
......@@ -587,7 +601,7 @@ public:
friend class CounterDecay;
static Klass* try_get_next_class();
private:
protected:
static void validate_protection_domain(instanceKlassHandle klass,
Handle class_loader,
Handle protection_domain, TRAPS);
......@@ -614,10 +628,10 @@ private:
static instanceKlassHandle find_or_define_instance_class(Symbol* class_name,
Handle class_loader,
instanceKlassHandle k, TRAPS);
static instanceKlassHandle load_shared_class(Symbol* class_name,
Handle class_loader, TRAPS);
static instanceKlassHandle load_shared_class(instanceKlassHandle ik,
Handle class_loader, TRAPS);
Handle class_loader,
Handle protection_domain,
TRAPS);
static instanceKlassHandle load_instance_class(Symbol* class_name, Handle class_loader, TRAPS);
static Handle compute_loader_lock_object(Handle class_loader, TRAPS);
static void check_loader_lock_contention(Handle loader_lock, TRAPS);
......@@ -625,9 +639,12 @@ private:
static bool is_parallelDefine(Handle class_loader);
public:
static instanceKlassHandle load_shared_class(Symbol* class_name,
Handle class_loader,
TRAPS);
static bool is_ext_class_loader(Handle class_loader);
private:
protected:
static Klass* find_shared_class(Symbol* class_name);
// Setup link to hierarchy
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#define SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
#include "classfile/systemDictionary.hpp"
class SystemDictionaryShared: public SystemDictionary {
public:
static void initialize(TRAPS) {}
static instanceKlassHandle find_or_load_shared_class(Symbol* class_name,
Handle class_loader,
TRAPS) {
return instanceKlassHandle();
}
static void roots_oops_do(OopClosure* blk) {}
static void oops_do(OopClosure* f) {}
static bool is_sharing_possible(ClassLoaderData* loader_data) {
oop class_loader = loader_data->class_loader();
return (class_loader == NULL);
}
};
#endif // SHARE_VM_CLASSFILE_SYSTEMDICTIONARYSHARED_HPP
......@@ -91,11 +91,17 @@
template(java_lang_CharSequence, "java/lang/CharSequence") \
template(java_lang_SecurityManager, "java/lang/SecurityManager") \
template(java_security_AccessControlContext, "java/security/AccessControlContext") \
template(java_security_CodeSource, "java/security/CodeSource") \
template(java_security_ProtectionDomain, "java/security/ProtectionDomain") \
template(java_security_SecureClassLoader, "java/security/SecureClassLoader") \
template(java_net_URLClassLoader, "java/net/URLClassLoader") \
template(java_net_URL, "java/net/URL") \
template(java_util_jar_Manifest, "java/util/jar/Manifest") \
template(impliesCreateAccessControlContext_name, "impliesCreateAccessControlContext") \
template(java_io_OutputStream, "java/io/OutputStream") \
template(java_io_Reader, "java/io/Reader") \
template(java_io_BufferedReader, "java/io/BufferedReader") \
template(java_io_File, "java/io/File") \
template(java_io_FileInputStream, "java/io/FileInputStream") \
template(java_io_ByteArrayInputStream, "java/io/ByteArrayInputStream") \
template(java_io_Serializable, "java/io/Serializable") \
......@@ -106,6 +112,7 @@
template(java_util_Hashtable, "java/util/Hashtable") \
template(java_lang_Compiler, "java/lang/Compiler") \
template(sun_misc_Signal, "sun/misc/Signal") \
template(sun_misc_Launcher, "sun/misc/Launcher") \
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
template(getBootClassPathEntryForClass_name, "getBootClassPathEntryForClass") \
template(sun_misc_PostVMInitHook, "sun/misc/PostVMInitHook") \
......@@ -397,6 +404,14 @@
template(signers_name, "signers_name") \
template(loader_data_name, "loader_data") \
template(dependencies_name, "dependencies") \
template(input_stream_void_signature, "(Ljava/io/InputStream;)V") \
template(getFileURL_name, "getFileURL") \
template(getFileURL_signature, "(Ljava/io/File;)Ljava/net/URL;") \
template(definePackageInternal_name, "definePackageInternal") \
template(definePackageInternal_signature, "(Ljava/lang/String;Ljava/util/jar/Manifest;Ljava/net/URL;)V") \
template(getProtectionDomain_name, "getProtectionDomain") \
template(getProtectionDomain_signature, "(Ljava/security/CodeSource;)Ljava/security/ProtectionDomain;") \
template(url_code_signer_array_void_signature, "(Ljava/net/URL;[Ljava/security/CodeSigner;)V") \
\
/* non-intrinsic name/signature pairs: */ \
template(register_method_name, "register") \
......
......@@ -34,8 +34,8 @@
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/shared/vmGCOperations.hpp"
#include "gc_implementation/shared/gcTimer.hpp"
......@@ -433,10 +433,6 @@ void CMMarkStack::oops_do(OopClosure* f) {
}
}
bool ConcurrentMark::not_yet_marked(oop obj) const {
return _g1h->is_obj_ill(obj);
}
CMRootRegions::CMRootRegions() :
_young_list(NULL), _cm(NULL), _scan_in_progress(false),
_should_abort(false), _next_survivor(NULL) { }
......@@ -891,7 +887,16 @@ class CheckBitmapClearHRClosure : public HeapRegionClosure {
}
virtual bool doHeapRegion(HeapRegion* r) {
return _bitmap->getNextMarkedWordAddress(r->bottom(), r->end()) != r->end();
// This closure can be called concurrently to the mutator, so we must make sure
// that the result of the getNextMarkedWordAddress() call is compared to the
// value passed to it as limit to detect any found bits.
// We can use the region's orig_end() for the limit and the comparison value
// as it always contains the "real" end of the region that never changes and
// has no side effects.
// Due to the latter, there can also be no problem with the compiler generating
// reloads of the orig_end() call.
HeapWord* end = r->orig_end();
return _bitmap->getNextMarkedWordAddress(r->bottom(), end) != end;
}
};
......@@ -1116,20 +1121,17 @@ public:
if (!_cm->has_aborted()) {
do {
double start_vtime_sec = os::elapsedVTime();
double start_time_sec = os::elapsedTime();
double mark_step_duration_ms = G1ConcMarkStepDurationMillis;
the_task->do_marking_step(mark_step_duration_ms,
true /* do_termination */,
false /* is_serial*/);
double end_time_sec = os::elapsedTime();
double end_vtime_sec = os::elapsedVTime();
double elapsed_vtime_sec = end_vtime_sec - start_vtime_sec;
double elapsed_time_sec = end_time_sec - start_time_sec;
_cm->clear_has_overflown();
bool ret = _cm->do_yield_check(worker_id);
_cm->do_yield_check(worker_id);
jlong sleep_time_ms;
if (!_cm->has_aborted() && the_task->has_aborted()) {
......@@ -1139,17 +1141,6 @@ public:
os::sleep(Thread::current(), sleep_time_ms, false);
SuspendibleThreadSet::join();
}
double end_time2_sec = os::elapsedTime();
double elapsed_time2_sec = end_time2_sec - start_time_sec;
#if 0
gclog_or_tty->print_cr("CM: elapsed %1.4lf ms, sleep %1.4lf ms, "
"overhead %1.4lf",
elapsed_vtime_sec * 1000.0, (double) sleep_time_ms,
the_task->conc_overhead(os::elapsedTime()) * 8.0);
gclog_or_tty->print_cr("elapsed time %1.4lf ms, time 2: %1.4lf ms",
elapsed_time_sec * 1000.0, elapsed_time2_sec * 1000.0);
#endif
} while (!_cm->has_aborted() && the_task->has_aborted());
}
the_task->record_end_time();
......@@ -1408,7 +1399,7 @@ protected:
void set_bit_for_region(HeapRegion* hr) {
assert(!hr->continuesHumongous(), "should have filtered those out");
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
if (!hr->startsHumongous()) {
// Normal (non-humongous) case: just set the bit.
_region_bm->par_at_put(index, true);
......@@ -1596,7 +1587,7 @@ public:
if (_verbose) {
gclog_or_tty->print_cr("Region %u: marked bytes mismatch: "
"expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT,
hr->hrs_index(), exp_marked_bytes, act_marked_bytes);
hr->hrm_index(), exp_marked_bytes, act_marked_bytes);
}
failures += 1;
}
......@@ -1605,7 +1596,7 @@ public:
// (which was just calculated) region bit maps.
// We're not OK if the bit in the calculated expected region
// bitmap is set and the bit in the actual region bitmap is not.
BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index();
BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index();
bool expected = _exp_region_bm->at(index);
bool actual = _region_bm->at(index);
......@@ -1613,7 +1604,7 @@ public:
if (_verbose) {
gclog_or_tty->print_cr("Region %u: region bitmap mismatch: "
"expected: %s, actual: %s",
hr->hrs_index(),
hr->hrm_index(),
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
}
failures += 1;
......@@ -1634,7 +1625,7 @@ public:
if (_verbose) {
gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": "
"expected: %s, actual: %s",
hr->hrs_index(), i,
hr->hrm_index(), i,
BOOL_TO_STR(expected), BOOL_TO_STR(actual));
}
failures += 1;
......@@ -2947,11 +2938,6 @@ void ConcurrentMark::clearRangeNextBitmap(MemRegion mr) {
_nextMarkBitMap->clearRange(mr);
}
void ConcurrentMark::clearRangeBothBitmaps(MemRegion mr) {
clearRangePrevBitmap(mr);
clearRangeNextBitmap(mr);
}
HeapRegion*
ConcurrentMark::claim_region(uint worker_id) {
// "checkpoint" the finger
......@@ -3254,7 +3240,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
assert(limit_idx <= end_idx, "or else use atomics");
// Aggregate the "stripe" in the count data associated with hr.
uint hrs_index = hr->hrs_index();
uint hrm_index = hr->hrm_index();
size_t marked_bytes = 0;
for (uint i = 0; i < _max_worker_id; i += 1) {
......@@ -3263,7 +3249,7 @@ class AggregateCountDataHRClosure: public HeapRegionClosure {
// Fetch the marked_bytes in this region for task i and
// add it to the running total for this region.
marked_bytes += marked_bytes_array[hrs_index];
marked_bytes += marked_bytes_array[hrm_index];
// Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx)
// into the global card bitmap.
......@@ -3497,17 +3483,6 @@ bool ConcurrentMark::do_yield_check(uint worker_id) {
}
}
bool ConcurrentMark::containing_card_is_marked(void* p) {
size_t offset = pointer_delta(p, _g1h->reserved_region().start(), 1);
return _card_bm.at(offset >> CardTableModRefBS::card_shift);
}
bool ConcurrentMark::containing_cards_are_marked(void* start,
void* last) {
return containing_card_is_marked(start) &&
containing_card_is_marked(last);
}
#ifndef PRODUCT
// for debugging purposes
void ConcurrentMark::print_finger() {
......@@ -3760,7 +3735,7 @@ void CMTask::regular_clock_call() {
if (_cm->verbose_medium()) {
gclog_or_tty->print_cr("[%u] regular clock, interval = %1.2lfms, "
"scanned = %d%s, refs reached = %d%s",
"scanned = "SIZE_FORMAT"%s, refs reached = "SIZE_FORMAT"%s",
_worker_id, last_interval_ms,
_words_scanned,
(_words_scanned >= _words_scanned_limit) ? " (*)" : "",
......
......@@ -683,7 +683,9 @@ public:
return _task_queues->steal(worker_id, hash_seed, obj);
}
ConcurrentMark(G1CollectedHeap* g1h, G1RegionToSpaceMapper* prev_bitmap_storage, G1RegionToSpaceMapper* next_bitmap_storage);
ConcurrentMark(G1CollectedHeap* g1h,
G1RegionToSpaceMapper* prev_bitmap_storage,
G1RegionToSpaceMapper* next_bitmap_storage);
~ConcurrentMark();
ConcurrentMarkThread* cmThread() { return _cmThread; }
......@@ -712,8 +714,10 @@ public:
// inconsistent) and always passing the size. hr is the region that
// contains the object and it's passed optionally from callers who
// might already have it (no point in recalculating it).
inline void grayRoot(oop obj, size_t word_size,
uint worker_id, HeapRegion* hr = NULL);
inline void grayRoot(oop obj,
size_t word_size,
uint worker_id,
HeapRegion* hr = NULL);
// It iterates over the heap and for each object it comes across it
// will dump the contents of its reference fields, as well as
......@@ -734,7 +738,8 @@ public:
// AND MARKED : indicates that an object is both explicitly and
// implicitly live (it should be one or the other, not both)
void print_reachable(const char* str,
VerifyOption vo, bool all) PRODUCT_RETURN;
VerifyOption vo,
bool all) PRODUCT_RETURN;
// Clear the next marking bitmap (will be called concurrently).
void clearNextBitmap();
......@@ -771,12 +776,11 @@ public:
// this carefully!
inline void markPrev(oop p);
// Clears marks for all objects in the given range, for the prev,
// next, or both bitmaps. NB: the previous bitmap is usually
// Clears marks for all objects in the given range, for the prev or
// next bitmaps. NB: the previous bitmap is usually
// read-only, so use this carefully!
void clearRangePrevBitmap(MemRegion mr);
void clearRangeNextBitmap(MemRegion mr);
void clearRangeBothBitmaps(MemRegion mr);
// Notify data structures that a GC has started.
void note_start_of_gc() {
......@@ -798,21 +802,6 @@ public:
bool verify_thread_buffers,
bool verify_fingers) PRODUCT_RETURN;
bool isMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
assert(addr >= _nextMarkBitMap->startWord() ||
addr < _nextMarkBitMap->endWord(), "in a region");
return _nextMarkBitMap->isMarked(addr);
}
inline bool not_yet_marked(oop p) const;
// XXX Debug code
bool containing_card_is_marked(void* p);
bool containing_cards_are_marked(void* start, void* last);
bool isPrevMarked(oop p) const {
assert(p != NULL && p->is_oop(), "expected an oop");
HeapWord* addr = (HeapWord*)p;
......@@ -898,7 +887,8 @@ public:
// marked_bytes array slot for the given HeapRegion.
// Sets the bits in the given card bitmap that are associated with the
// cards that are spanned by the memory region.
inline void count_region(MemRegion mr, HeapRegion* hr,
inline void count_region(MemRegion mr,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
......@@ -906,56 +896,27 @@ public:
// data structures for the given worker id.
inline void count_region(MemRegion mr, HeapRegion* hr, uint worker_id);
// Counts the given memory region in the task/worker counting
// data structures for the given worker id.
inline void count_region(MemRegion mr, uint worker_id);
// Counts the given object in the given task/worker counting
// data structures.
inline void count_object(oop obj, HeapRegion* hr,
inline void count_object(oop obj,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
// Counts the given object in the task/worker counting data
// structures for the given worker id.
inline void count_object(oop obj, HeapRegion* hr, uint worker_id);
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
inline bool par_mark_and_count(oop obj, HeapRegion* hr,
inline bool par_mark_and_count(oop obj,
HeapRegion* hr,
size_t* marked_bytes_array,
BitMap* task_card_bm);
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool par_mark_and_count(oop obj, size_t word_size,
HeapRegion* hr, uint worker_id);
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool par_mark_and_count(oop obj, HeapRegion* hr, uint worker_id);
// Similar to the above routine but we don't know the heap region that
// contains the object to be marked/counted, which this routine looks up.
inline bool par_mark_and_count(oop obj, uint worker_id);
// Similar to the above routine but there are times when we cannot
// safely calculate the size of obj due to races and we, therefore,
// pass the size in as a parameter. It is the caller's reponsibility
// to ensure that the size passed in for obj is valid.
inline bool par_mark_and_count(oop obj, size_t word_size, uint worker_id);
// Unconditionally mark the given object, and unconditinally count
// the object in the counting structures for worker id 0.
// Should *not* be called from parallel code.
inline bool mark_and_count(oop obj, HeapRegion* hr);
// Similar to the above routine but we don't know the heap region that
// contains the object to be marked/counted, which this routine looks up.
// Should *not* be called from parallel code.
inline bool mark_and_count(oop obj);
inline bool par_mark_and_count(oop obj,
size_t word_size,
HeapRegion* hr,
uint worker_id);
// Returns true if initialization was successfully completed.
bool completed_initialization() const {
......@@ -1227,9 +1188,12 @@ public:
_finger = new_finger;
}
CMTask(uint worker_id, ConcurrentMark *cm,
size_t* marked_bytes, BitMap* card_bm,
CMTaskQueue* task_queue, CMTaskQueueSet* task_queues);
CMTask(uint worker_id,
ConcurrentMark *cm,
size_t* marked_bytes,
BitMap* card_bm,
CMTaskQueue* task_queue,
CMTaskQueueSet* task_queues);
// it prints statistics associated with this task
void print_stats();
......
......@@ -86,7 +86,7 @@ inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
HeapWord* start = mr.start();
HeapWord* end = mr.end();
size_t region_size_bytes = mr.byte_size();
uint index = hr->hrs_index();
uint index = hr->hrm_index();
assert(!hr->continuesHumongous(), "should not be HC region");
assert(hr == g1h->heap_region_containing(start), "sanity");
......@@ -125,14 +125,6 @@ inline void ConcurrentMark::count_region(MemRegion mr,
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
// Counts the given memory region, which may be a single object, in the
// task/worker counting data structures for the given worker id.
inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
HeapWord* addr = mr.start();
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
count_region(mr, hr, worker_id);
}
// Counts the given object in the given task/worker counting data structures.
inline void ConcurrentMark::count_object(oop obj,
HeapRegion* hr,
......@@ -142,17 +134,6 @@ inline void ConcurrentMark::count_object(oop obj,
count_region(mr, hr, marked_bytes_array, task_card_bm);
}
// Counts the given object in the task/worker counting data
// structures for the given worker id.
inline void ConcurrentMark::count_object(oop obj,
HeapRegion* hr,
uint worker_id) {
size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
BitMap* task_card_bm = count_card_bitmap_for(worker_id);
HeapWord* addr = (HeapWord*) obj;
count_object(obj, hr, marked_bytes_array, task_card_bm);
}
// Attempts to mark the given object and, if successful, counts
// the object in the given task/worker counting structures.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
......@@ -184,63 +165,6 @@ inline bool ConcurrentMark::par_mark_and_count(oop obj,
return false;
}
// Attempts to mark the given object and, if successful, counts
// the object in the task/worker counting structures for the
// given worker id.
inline bool ConcurrentMark::par_mark_and_count(oop obj,
HeapRegion* hr,
uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
if (_nextMarkBitMap->parMark(addr)) {
// Update the task specific count data for the object.
count_object(obj, hr, worker_id);
return true;
}
return false;
}
// As above - but we don't know the heap region containing the
// object and so have to supply it.
inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
return par_mark_and_count(obj, hr, worker_id);
}
// Similar to the above routine but we already know the size, in words, of
// the object that we wish to mark/count
inline bool ConcurrentMark::par_mark_and_count(oop obj,
size_t word_size,
uint worker_id) {
HeapWord* addr = (HeapWord*)obj;
if (_nextMarkBitMap->parMark(addr)) {
// Update the task specific count data for the object.
MemRegion mr(addr, word_size);
count_region(mr, worker_id);
return true;
}
return false;
}
// Unconditionally mark the given object, and unconditinally count
// the object in the counting structures for worker id 0.
// Should *not* be called from parallel code.
inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
HeapWord* addr = (HeapWord*)obj;
_nextMarkBitMap->mark(addr);
// Update the task specific count data for the object.
count_object(obj, hr, 0 /* worker_id */);
return true;
}
// As above - but we don't have the heap region containing the
// object, so we have to supply it.
inline bool ConcurrentMark::mark_and_count(oop obj) {
HeapWord* addr = (HeapWord*)obj;
HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
return mark_and_count(obj, hr);
}
inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
HeapWord* start_addr = MAX2(startWord(), mr.start());
HeapWord* end_addr = MIN2(endWord(), mr.end());
......
......@@ -129,8 +129,7 @@ HeapWord* G1AllocRegion::new_alloc_region_and_allocate(size_t word_size,
// Note that we first perform the allocation and then we store the
// region in _alloc_region. This is the reason why an active region
// can never be empty.
_alloc_region = new_alloc_region;
_count += 1;
update_alloc_region(new_alloc_region);
trace("region allocation successful");
return result;
} else {
......@@ -172,6 +171,19 @@ void G1AllocRegion::set(HeapRegion* alloc_region) {
trace("set");
}
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) {
trace("update");
// We explicitly check that the region is not empty to make sure we
// maintain the "the alloc region cannot be empty" invariant.
assert(alloc_region != NULL && !alloc_region->is_empty(),
ar_ext_msg(this, "pre-condition"));
_alloc_region = alloc_region;
_alloc_region->set_allocation_context(allocation_context());
_count += 1;
trace("updated");
}
HeapRegion* G1AllocRegion::release() {
trace("releasing");
HeapRegion* alloc_region = _alloc_region;
......@@ -225,5 +237,70 @@ void G1AllocRegion::trace(const char* str, size_t word_size, HeapWord* result) {
G1AllocRegion::G1AllocRegion(const char* name,
bool bot_updates)
: _name(name), _bot_updates(bot_updates),
_alloc_region(NULL), _count(0), _used_bytes_before(0) { }
_alloc_region(NULL), _count(0), _used_bytes_before(0),
_allocation_context(AllocationContext::system()) { }
HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
bool force) {
return _g1h->new_mutator_alloc_region(word_size, force);
}
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
}
HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForSurvived);
}
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForSurvived);
}
HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
bool force) {
assert(!force, "not supported for GC alloc regions");
return _g1h->new_gc_alloc_region(word_size, count(), GCAllocForTenured);
}
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes,
GCAllocForTenured);
}
HeapRegion* OldGCAllocRegion::release() {
HeapRegion* cur = get();
if (cur != NULL) {
// Determine how far we are from the next card boundary. If it is smaller than
// the minimum object size we can allocate into, expand into the next card.
HeapWord* top = cur->top();
HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes);
size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize);
if (to_allocate_words != 0) {
// We are not at a card boundary. Fill up, possibly into the next, taking the
// end of the region and the minimum object size into account.
to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize),
MAX2(to_allocate_words, G1CollectedHeap::min_fill_size()));
// Skip allocation if there is not enough space to allocate even the smallest
// possible object. In this case this region will not be retained, so the
// original problem cannot occur.
if (to_allocate_words >= G1CollectedHeap::min_fill_size()) {
HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */);
CollectedHeap::fill_with_object(dummy, to_allocate_words);
}
}
}
return G1AllocRegion::release();
}
......@@ -57,6 +57,9 @@ private:
// correct use of init() and release()).
HeapRegion* volatile _alloc_region;
// Allocation context associated with this alloc region.
AllocationContext_t _allocation_context;
// It keeps track of the distinct number of regions that are used
// for allocation in the active interval of this object, i.e.,
// between a call to init() and a call to release(). The count
......@@ -110,6 +113,10 @@ private:
// else can allocate out of it.
void retire(bool fill_up);
// After a region is allocated by alloc_new_region, this
// method is used to set it as the active alloc_region
void update_alloc_region(HeapRegion* alloc_region);
// Allocate a new active region and use it to perform a word_size
// allocation. The force parameter will be passed on to
// G1CollectedHeap::allocate_new_alloc_region() and tells it to try
......@@ -137,6 +144,9 @@ public:
return (hr == _dummy_region) ? NULL : hr;
}
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
AllocationContext_t allocation_context() { return _allocation_context; }
uint count() { return _count; }
// The following two are the building blocks for the allocation method.
......@@ -182,6 +192,40 @@ public:
#endif // G1_ALLOC_REGION_TRACING
};
class MutatorAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
MutatorAllocRegion()
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
SurvivorGCAllocRegion()
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
};
class OldGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
OldGCAllocRegion()
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
// This specialization of release() makes sure that the last card that has
// been allocated into has been completely filled by a dummy object. This
// avoids races when remembered set scanning wants to update the BOT of the
// last card in the retained old gc alloc region, and allocation threads
// allocating into that card at the same time.
virtual HeapRegion* release();
};
class ar_ext_msg : public err_msg {
public:
ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("%s", "") {
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
#include "memory/allocation.hpp"
typedef unsigned char AllocationContext_t;
class AllocationContext : AllStatic {
public:
// Currently used context
static AllocationContext_t current() {
return 0;
}
// System wide default context
static AllocationContext_t system() {
return 0;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
void G1DefaultAllocator::init_mutator_alloc_region() {
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
_mutator_alloc_region.init();
}
void G1DefaultAllocator::release_mutator_alloc_region() {
_mutator_alloc_region.release();
assert(_mutator_alloc_region.get() == NULL, "post-condition");
}
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old;
*retained_old = NULL;
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),
// b) it's already full (no point in using it),
// c) it's empty (this means that it was emptied during
// a cleanup and it should be on the free list now), or
// d) it's humongous (this means that it was emptied
// during a cleanup and was added to the free list, but
// has been subsequently used to allocate a humongous
// object that may be less than the region size).
if (retained_region != NULL &&
!retained_region->in_collection_set() &&
!(retained_region->top() == retained_region->end()) &&
!retained_region->is_empty() &&
!retained_region->isHumongous()) {
retained_region->record_top_and_timestamp();
// The retained region was added to the old region set when it was
// retired. We have to remove it now, since we don't allow regions
// we allocate to in the region sets. We'll re-add it later, when
// it's retired again.
_g1h->_old_set.remove(retained_region);
bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
retained_region->note_start_of_copying(during_im);
old->set(retained_region);
_g1h->_hr_printer.reuse(retained_region);
evacuation_info.set_alloc_regions_used_before(retained_region->used());
}
}
void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) {
assert_at_safepoint(true /* should_be_vm_thread */);
_survivor_gc_alloc_region.init();
_old_gc_alloc_region.init();
reuse_retained_old_region(evacuation_info,
&_old_gc_alloc_region,
&_retained_old_gc_alloc_region);
}
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
AllocationContext_t context = AllocationContext::current();
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
old_gc_alloc_region(context)->count());
survivor_gc_alloc_region(context)->release();
// If we have an old GC alloc region to release, we'll save it in
// _retained_old_gc_alloc_region. If we don't
// _retained_old_gc_alloc_region will become NULL. This is what we
// want either way so no reason to check explicitly for either
// condition.
_retained_old_gc_alloc_region = old_gc_alloc_region(context)->release();
if (ResizePLAB) {
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
_g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
}
}
void G1DefaultAllocator::abandon_gc_alloc_regions() {
assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
_retained_old_gc_alloc_region = NULL;
}
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
ParGCAllocBuffer(gclab_word_size), _retired(true) { }
HeapWord* G1ParGCAllocator::allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose, context);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size, context);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz, context);
}
return obj;
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)) {
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
GCAllocPurposeCount
};
// Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> {
friend class VMStructs;
protected:
G1CollectedHeap* _g1h;
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
size_t _summary_bytes_used;
public:
G1Allocator(G1CollectedHeap* heap) :
_g1h(heap), _summary_bytes_used(0) { }
static G1Allocator* create_allocator(G1CollectedHeap* g1h);
virtual void init_mutator_alloc_region() = 0;
virtual void release_mutator_alloc_region() = 0;
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info) = 0;
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) = 0;
virtual void abandon_gc_alloc_regions() = 0;
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) = 0;
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) = 0;
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) = 0;
virtual size_t used() = 0;
virtual bool is_retained_old_region(HeapRegion* hr) = 0;
void reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained);
size_t used_unlocked() const {
return _summary_bytes_used;
}
void increase_used(size_t bytes) {
_summary_bytes_used += bytes;
}
void decrease_used(size_t bytes) {
assert(_summary_bytes_used >= bytes,
err_msg("invariant: _summary_bytes_used: "SIZE_FORMAT" should be >= bytes: "SIZE_FORMAT,
_summary_bytes_used, bytes));
_summary_bytes_used -= bytes;
}
void set_used(size_t bytes) {
_summary_bytes_used = bytes;
}
};
// The default allocator for G1.
class G1DefaultAllocator : public G1Allocator {
protected:
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// survivor objects.
SurvivorGCAllocRegion _survivor_gc_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
HeapRegion* _retained_old_gc_alloc_region;
public:
G1DefaultAllocator(G1CollectedHeap* heap) : G1Allocator(heap), _retained_old_gc_alloc_region(NULL) { }
virtual void init_mutator_alloc_region();
virtual void release_mutator_alloc_region();
virtual void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
virtual void abandon_gc_alloc_regions();
virtual bool is_retained_old_region(HeapRegion* hr) {
return _retained_old_gc_alloc_region == hr;
}
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
return &_mutator_alloc_region;
}
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
return &_survivor_gc_alloc_region;
}
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
return &_old_gc_alloc_region;
}
virtual size_t used() {
assert(Heap_lock->owner() != NULL,
"Should be owned on this thread's behalf.");
size_t result = _summary_bytes_used;
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
if (hr != NULL) {
result += hr->used();
}
return result;
}
};
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
public:
G1ParGCAllocBuffer(size_t gclab_word_size);
virtual ~G1ParGCAllocBuffer() {
guarantee(_retired, "Allocation buffer has not been retired");
}
virtual void set_buf(HeapWord* buf) {
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
virtual void retire(bool end_of_gc, bool retain) {
if (_retired) {
return;
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
};
class G1ParGCAllocator : public CHeapObj<mtGC> {
friend class G1ParScanThreadState;
protected:
G1CollectedHeap* _g1h;
size_t _alloc_buffer_waste;
size_t _undo_waste;
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context);
virtual void retire_alloc_buffers() = 0;
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) = 0;
public:
G1ParGCAllocator(G1CollectedHeap* g1h) :
_g1h(g1h), _alloc_buffer_waste(0), _undo_waste(0) {
}
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
size_t undo_waste() {return _undo_waste; }
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz, AllocationContext_t context) {
HeapWord* obj = NULL;
if (purpose == GCAllocForSurvived) {
obj = alloc_buffer(purpose, context)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
} else {
obj = alloc_buffer(purpose, context)->allocate(word_sz);
}
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz, context);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz, AllocationContext_t context) {
if (alloc_buffer(purpose, context)->contains(obj)) {
assert(alloc_buffer(purpose, context)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose, context)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
};
class G1DefaultParGCAllocator : public G1ParGCAllocator {
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
virtual G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose, AllocationContext_t context) {
return _alloc_buffers[purpose];
}
virtual void retire_alloc_buffers() ;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATOR_HPP
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
G1Allocator* G1Allocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultAllocator(g1h);
}
G1ParGCAllocator* G1ParGCAllocator::create_allocator(G1CollectedHeap* g1h) {
return new G1DefaultParGCAllocator(g1h);
}
......@@ -25,6 +25,8 @@
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "gc_implementation/g1/concurrentMark.hpp"
#include "gc_implementation/g1/evacuationInfo.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
......@@ -33,7 +35,7 @@
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionSeq.hpp"
#include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/shared/hSpaceCounters.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
......@@ -80,12 +82,6 @@ typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
enum GCAllocPurpose {
GCAllocForTenured,
GCAllocForSurvived,
GCAllocPurposeCount
};
class YoungList : public CHeapObj<mtGC> {
private:
G1CollectedHeap* _g1h;
......@@ -158,40 +154,6 @@ public:
void print();
};
class MutatorAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
MutatorAllocRegion()
: G1AllocRegion("Mutator Alloc Region", false /* bot_updates */) { }
};
class SurvivorGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
SurvivorGCAllocRegion()
: G1AllocRegion("Survivor GC Alloc Region", false /* bot_updates */) { }
};
class OldGCAllocRegion : public G1AllocRegion {
protected:
virtual HeapRegion* allocate_new_region(size_t word_size, bool force);
virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes);
public:
OldGCAllocRegion()
: G1AllocRegion("Old GC Alloc Region", true /* bot_updates */) { }
// This specialization of release() makes sure that the last card that has been
// allocated into has been completely filled by a dummy object.
// This avoids races when remembered set scanning wants to update the BOT of the
// last card in the retained old gc alloc region, and allocation threads
// allocating into that card at the same time.
virtual HeapRegion* release();
};
// The G1 STW is alive closure.
// An instance is embedded into the G1CH and used as the
// (optional) _is_alive_non_header closure in the STW
......@@ -222,6 +184,9 @@ class G1CollectedHeap : public SharedHeap {
friend class MutatorAllocRegion;
friend class SurvivorGCAllocRegion;
friend class OldGCAllocRegion;
friend class G1Allocator;
friend class G1DefaultAllocator;
friend class G1ResManAllocator;
// Closures used in implementation.
template <G1Barrier barrier, G1Mark do_mark_object>
......@@ -232,6 +197,8 @@ class G1CollectedHeap : public SharedHeap {
friend class G1ParScanClosureSuper;
friend class G1ParEvacuateFollowersClosure;
friend class G1ParTask;
friend class G1ParGCAllocator;
friend class G1DefaultParGCAllocator;
friend class G1FreeGarbageRegionClosure;
friend class RefineCardTableEntryClosure;
friend class G1PrepareCompactClosure;
......@@ -291,46 +258,17 @@ private:
G1RegionMappingChangedListener _listener;
// The sequence of all heap regions in the heap.
HeapRegionSeq _hrs;
HeapRegionManager _hrm;
// Alloc region used to satisfy mutator allocation requests.
MutatorAllocRegion _mutator_alloc_region;
// Alloc region used to satisfy allocation requests by the GC for
// survivor objects.
SurvivorGCAllocRegion _survivor_gc_alloc_region;
// Class that handles the different kinds of allocations.
G1Allocator* _allocator;
// PLAB sizing policy for survivors.
PLABStats _survivor_plab_stats;
// Alloc region used to satisfy allocation requests by the GC for
// old objects.
OldGCAllocRegion _old_gc_alloc_region;
// PLAB sizing policy for tenured objects.
PLABStats _old_plab_stats;
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
PLABStats* stats = NULL;
switch (purpose) {
case GCAllocForSurvived:
stats = &_survivor_plab_stats;
break;
case GCAllocForTenured:
stats = &_old_plab_stats;
break;
default:
assert(false, "unrecognized GCAllocPurpose");
}
return stats;
}
// The last old region we allocated to during the last GC.
// Typically, it is not full so we should re-use it during the next GC.
HeapRegion* _retained_old_gc_alloc_region;
// It specifies whether we should attempt to expand the heap after a
// region allocation failure. If heap expansion fails we set this to
// false so that we don't re-attempt the heap expansion (it's likely
......@@ -348,9 +286,6 @@ private:
// It initializes the GC alloc regions at the start of a GC.
void init_gc_alloc_regions(EvacuationInfo& evacuation_info);
// Setup the retained old gc alloc region as the currrent old gc alloc region.
void use_retained_old_gc_alloc_region(EvacuationInfo& evacuation_info);
// It releases the GC alloc regions at the end of a GC.
void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
......@@ -361,13 +296,6 @@ private:
// Helper for monitoring and management support.
G1MonitoringSupport* _g1mm;
// Determines PLAB size for a particular allocation purpose.
size_t desired_plab_sz(GCAllocPurpose purpose);
// Outside of GC pauses, the number of bytes used in all regions other
// than the current allocation region.
size_t _summary_bytes_used;
// Records whether the region at the given index is kept live by roots or
// references from the young generation.
class HumongousIsLiveBiasedMappedArray : public G1BiasedMappedArray<bool> {
......@@ -429,7 +357,7 @@ private:
// If the HR printer is active, dump the state of the regions in the
// heap after a compaction.
void print_hrs_post_compaction();
void print_hrm_post_compaction();
double verify(bool guard, const char* msg);
void verify_before_gc();
......@@ -525,11 +453,12 @@ protected:
// humongous region.
HeapWord* humongous_obj_allocate_initialize_regions(uint first,
uint num_regions,
size_t word_size);
size_t word_size,
AllocationContext_t context);
// Attempt to allocate a humongous object of the given size. Return
// NULL if unsuccessful.
HeapWord* humongous_obj_allocate(size_t word_size);
HeapWord* humongous_obj_allocate(size_t word_size, AllocationContext_t context);
// The following two methods, allocate_new_tlab() and
// mem_allocate(), are the two main entry points from the runtime
......@@ -585,6 +514,7 @@ protected:
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
HeapWord* attempt_allocation_slow(size_t word_size,
AllocationContext_t context,
unsigned int* gc_count_before_ret,
int* gclocker_retry_count_ret);
......@@ -599,7 +529,8 @@ protected:
// specifies whether the mutator alloc region is expected to be NULL
// or not.
HeapWord* attempt_allocation_at_safepoint(size_t word_size,
bool expect_null_mutator_alloc_region);
AllocationContext_t context,
bool expect_null_mutator_alloc_region);
// It dirties the cards that cover the block so that so that the post
// write barrier never queues anything when updating objects on this
......@@ -611,7 +542,9 @@ protected:
// allocation region, either by picking one or expanding the
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose, size_t word_size);
HeapWord* par_allocate_during_gc(GCAllocPurpose purpose,
size_t word_size,
AllocationContext_t context);
HeapWord* allocate_during_gc_slow(GCAllocPurpose purpose,
HeapRegion* alloc_region,
......@@ -623,10 +556,12 @@ protected:
void par_allocate_remaining_space(HeapRegion* r);
// Allocation attempt during GC for a survivor object / PLAB.
inline HeapWord* survivor_attempt_allocation(size_t word_size);
inline HeapWord* survivor_attempt_allocation(size_t word_size,
AllocationContext_t context);
// Allocation attempt during GC for an old object / PLAB.
inline HeapWord* old_attempt_allocation(size_t word_size);
inline HeapWord* old_attempt_allocation(size_t word_size,
AllocationContext_t context);
// These methods are the "callbacks" from the G1AllocRegion class.
......@@ -665,13 +600,15 @@ protected:
// Callback from VM_G1CollectForAllocation operation.
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t word_size, bool* succeeded);
HeapWord* satisfy_failed_allocation(size_t word_size,
AllocationContext_t context,
bool* succeeded);
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
// successful, perform the allocation and return the address of the
// allocated block, or else "NULL".
HeapWord* expand_and_allocate(size_t word_size);
HeapWord* expand_and_allocate(size_t word_size, AllocationContext_t context);
// Process any reference objects discovered during
// an incremental evacuation pause.
......@@ -694,6 +631,27 @@ public:
// (Rounds up to a HeapRegion boundary.)
bool expand(size_t expand_bytes);
// Returns the PLAB statistics given a purpose.
PLABStats* stats_for_purpose(GCAllocPurpose purpose) {
PLABStats* stats = NULL;
switch (purpose) {
case GCAllocForSurvived:
stats = &_survivor_plab_stats;
break;
case GCAllocForTenured:
stats = &_old_plab_stats;
break;
default:
assert(false, "unrecognized GCAllocPurpose");
}
return stats;
}
// Determines PLAB size for a particular allocation purpose.
size_t desired_plab_sz(GCAllocPurpose purpose);
// Do anything common to GC's.
virtual void gc_prologue(bool full);
virtual void gc_epilogue(bool full);
......@@ -715,7 +673,7 @@ public:
// We register a region with the fast "in collection set" test. We
// simply set to true the array slot corresponding to this region.
void register_region_with_in_cset_fast_test(HeapRegion* r) {
_in_cset_fast_test.set_in_cset(r->hrs_index());
_in_cset_fast_test.set_in_cset(r->hrm_index());
}
// This is a fast test on whether a reference points into the
......@@ -1171,17 +1129,17 @@ public:
// But G1CollectedHeap doesn't yet support this.
virtual bool is_maximal_no_gc() const {
return _hrs.available() == 0;
return _hrm.available() == 0;
}
// The current number of regions in the heap.
uint num_regions() const { return _hrs.length(); }
uint num_regions() const { return _hrm.length(); }
// The max number of regions in the heap.
uint max_regions() const { return _hrs.max_length(); }
uint max_regions() const { return _hrm.max_length(); }
// The number of regions that are completely free.
uint num_free_regions() const { return _hrs.num_free_regions(); }
uint num_free_regions() const { return _hrm.num_free_regions(); }
// The number of regions that are not completely free.
uint num_used_regions() const { return num_regions() - num_free_regions(); }
......@@ -1233,7 +1191,7 @@ public:
#ifdef ASSERT
bool is_on_master_free_list(HeapRegion* hr) {
return _hrs.is_free(hr);
return _hrm.is_free(hr);
}
#endif // ASSERT
......@@ -1245,7 +1203,7 @@ public:
}
void append_secondary_free_list() {
_hrs.insert_list_into_free_list(&_secondary_free_list);
_hrm.insert_list_into_free_list(&_secondary_free_list);
}
void append_secondary_free_list_if_not_empty_with_lock() {
......@@ -1271,7 +1229,7 @@ public:
// Determine whether the given region is one that we are using as an
// old GC alloc region.
bool is_old_gc_alloc_region(HeapRegion* hr) {
return hr == _retained_old_gc_alloc_region;
return _allocator->is_retained_old_region(hr);
}
// Perform a collection of the heap; intended for use in implementing
......@@ -1356,13 +1314,13 @@ public:
// Return "TRUE" iff the given object address is in the reserved
// region of g1.
bool is_in_g1_reserved(const void* p) const {
return _hrs.reserved().contains(p);
return _hrm.reserved().contains(p);
}
// Returns a MemRegion that corresponds to the space that has been
// reserved for the heap
MemRegion g1_reserved() const {
return _hrs.reserved();
return _hrm.reserved();
}
virtual bool is_in_closed_subset(const void* p) const;
......@@ -1752,28 +1710,4 @@ protected:
size_t _max_heap_capacity;
};
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
private:
bool _retired;
public:
G1ParGCAllocBuffer(size_t gclab_word_size);
virtual ~G1ParGCAllocBuffer() {
guarantee(_retired, "Allocation buffer has not been retired");
}
virtual void set_buf(HeapWord* buf) {
ParGCAllocBuffer::set_buf(buf);
_retired = false;
}
virtual void retire(bool end_of_gc, bool retain) {
if (_retired) {
return;
}
ParGCAllocBuffer::retire(end_of_gc, retain);
_retired = true;
}
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_HPP
......@@ -30,15 +30,15 @@
#include "gc_implementation/g1/g1AllocRegion.inline.hpp"
#include "gc_implementation/g1/g1CollectorPolicy.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "runtime/orderAccess.inline.hpp"
#include "utilities/taskqueue.hpp"
// Inline functions for G1CollectedHeap
// Return the region with the given index. It assumes the index is valid.
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
assert(is_in_reserved(addr),
......@@ -48,7 +48,7 @@ inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
}
inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
return _hrs.reserved().start() + index * HeapRegion::GrainWords;
return _hrm.reserved().start() + index * HeapRegion::GrainWords;
}
template <class T>
......@@ -57,7 +57,7 @@ inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) con
assert(is_in_g1_reserved((const void*) addr),
err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
return _hrs.addr_to_region((HeapWord*) addr);
return _hrm.addr_to_region((HeapWord*) addr);
}
template <class T>
......@@ -87,7 +87,7 @@ inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
}
inline bool G1CollectedHeap::obj_in_cs(oop obj) {
HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
return r != NULL && r->in_collection_set();
}
......@@ -98,10 +98,12 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
assert(!isHumongous(word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
AllocationContext_t context = AllocationContext::current();
HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
result = attempt_allocation_slow(word_size,
context,
gc_count_before_ret,
gclocker_retry_count_ret);
}
......@@ -112,17 +114,17 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
return result;
}
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
word_size) {
inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!isHumongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
false /* bot_updates */);
HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
false /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
false /* bot_updates */);
result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
false /* bot_updates */);
}
if (result != NULL) {
dirty_young_block(result, word_size);
......@@ -130,16 +132,17 @@ inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
return result;
}
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
AllocationContext_t context) {
assert(!isHumongous(word_size),
"we should not be seeing humongous-size allocations in this path");
HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
true /* bot_updates */);
HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
true /* bot_updates */);
if (result == NULL) {
MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
true /* bot_updates */);
result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
true /* bot_updates */);
}
return result;
}
......
......@@ -38,11 +38,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_g1_rem(g1h->g1_rem_set()),
_hash_seed(17), _queue_num(queue_num),
_term_attempts(0),
_surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
_tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
_age_table(false), _scanner(g1h, rp),
_strong_roots_time(0), _term_time(0),
_alloc_buffer_waste(0), _undo_waste(0) {
_strong_roots_time(0), _term_time(0) {
_scanner.set_par_scan_thread_state(this);
// we allocate G1YoungSurvRateNumRegions plus one entries, since
// we "sacrifice" entry 0 to keep track of surviving bytes for
......@@ -60,14 +57,14 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
_alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
_alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
_g1_par_allocator = G1ParGCAllocator::create_allocator(_g1h);
_start = os::elapsedTime();
}
G1ParScanThreadState::~G1ParScanThreadState() {
retire_alloc_buffers();
_g1_par_allocator->retire_alloc_buffers();
delete _g1_par_allocator;
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
......@@ -90,14 +87,16 @@ G1ParScanThreadState::print_termination_stats(int i,
const double elapsed_ms = elapsed_time() * 1000.0;
const double s_roots_ms = strong_roots_time() * 1000.0;
const double term_ms = term_time() * 1000.0;
const size_t alloc_buffer_waste = _g1_par_allocator->alloc_buffer_waste();
const size_t undo_waste = _g1_par_allocator->undo_waste();
st->print_cr("%3d %9.2f %9.2f %6.2f "
"%9.2f %6.2f " SIZE_FORMAT_W(8) " "
SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
(alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
alloc_buffer_waste() * HeapWordSize / K,
undo_waste() * HeapWordSize / K);
(alloc_buffer_waste + undo_waste) * HeapWordSize / K,
alloc_buffer_waste * HeapWordSize / K,
undo_waste * HeapWordSize / K);
}
#ifdef ASSERT
......@@ -164,12 +163,13 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
: m->age();
GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
word_sz);
HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
AllocationContext_t context = from_region->allocation_context();
HeapWord* obj_ptr = _g1_par_allocator->allocate(alloc_purpose, word_sz, context);
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
if (obj_ptr != NULL) {
undo_allocation(alloc_purpose, obj_ptr, word_sz);
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj_ptr = NULL;
}
}
......@@ -246,66 +246,8 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
obj->oop_iterate_backwards(&_scanner);
}
} else {
undo_allocation(alloc_purpose, obj_ptr, word_sz);
_g1_par_allocator->undo_allocation(alloc_purpose, obj_ptr, word_sz, context);
obj = forward_ptr;
}
return obj;
}
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
if (purpose == GCAllocForSurvived) {
obj = alloc_buffer(GCAllocForSurvived)->allocate_aligned(word_sz, SurvivorAlignmentInBytes);
} else {
obj = alloc_buffer(GCAllocForTenured)->allocate(word_sz);
}
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz);
}
void G1ParScanThreadState::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
......@@ -46,9 +46,8 @@ class G1ParScanThreadState : public StackObj {
G1SATBCardTableModRefBS* _ct_bs;
G1RemSet* _g1_rem;
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[GCAllocPurposeCount];
G1ParGCAllocator* _g1_par_allocator;
ageTable _age_table;
G1ParScanClosure _scanner;
......@@ -78,7 +77,6 @@ class G1ParScanThreadState : public StackObj {
#define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
DirtyCardQueue& dirty_card_queue() { return _dcq; }
......@@ -104,13 +102,6 @@ class G1ParScanThreadState : public StackObj {
ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
return _alloc_buffers[purpose];
}
size_t alloc_buffer_waste() const { return _alloc_buffer_waste; }
size_t undo_waste() const { return _undo_waste; }
#ifdef ASSERT
bool queue_is_empty() const { return _refs->is_empty(); }
......@@ -126,12 +117,6 @@ class G1ParScanThreadState : public StackObj {
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
private:
inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
......@@ -177,8 +162,6 @@ class G1ParScanThreadState : public StackObj {
}
private:
void retire_alloc_buffers();
#define G1_PARTIAL_ARRAY_MASK 0x2
inline bool has_partial_array_mask(oop* ref) const {
......
......@@ -32,7 +32,7 @@
#include "gc_implementation/g1/g1GCPhaseTimes.hpp"
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/g1RemSet.inline.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.inline.hpp"
......
......@@ -29,7 +29,7 @@
#include "gc_implementation/g1/g1OopClosures.inline.hpp"
#include "gc_implementation/g1/heapRegion.inline.hpp"
#include "gc_implementation/g1/heapRegionRemSet.hpp"
#include "gc_implementation/g1/heapRegionSeq.inline.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/shared/liveRange.hpp"
#include "memory/genOopClosures.inline.hpp"
#include "memory/iterator.hpp"
......@@ -219,6 +219,7 @@ void HeapRegion::hr_clear(bool par, bool clear_space, bool locked) {
_in_collection_set = false;
set_allocation_context(AllocationContext::system());
set_young_index_in_cset(-1);
uninstall_surv_rate_group();
set_young_type(NotYoung);
......@@ -344,11 +345,11 @@ HeapWord* HeapRegion::next_block_start_careful(HeapWord* addr) {
return low;
}
HeapRegion::HeapRegion(uint hrs_index,
HeapRegion::HeapRegion(uint hrm_index,
G1BlockOffsetSharedArray* sharedOffsetArray,
MemRegion mr) :
MemRegion mr, AllocationContext_t context) :
G1OffsetTableContigSpace(sharedOffsetArray, mr),
_hrs_index(hrs_index),
_hrm_index(hrm_index), _allocation_context(context),
_humongous_type(NotHumongous), _humongous_start_region(NULL),
_in_collection_set(false),
_next_in_special_set(NULL), _orig_end(NULL),
......@@ -715,6 +716,8 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const {
st->print("AC%4u", allocation_context());
if (isHumongous()) {
if (startsHumongous())
st->print(" HS");
......
......@@ -22,8 +22,8 @@
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
#include "gc_implementation/g1/g1BiasedArray.hpp"
#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
......@@ -64,7 +64,7 @@ class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
// * max_length() returns the maximum number of regions the heap can have.
//
class HeapRegionSeq: public CHeapObj<mtGC> {
class HeapRegionManager: public CHeapObj<mtGC> {
friend class VMStructs;
G1HeapRegionTable _regions;
......@@ -104,7 +104,7 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const;
// Find a contiguous set of empty or uncommitted regions of length num and return
// the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful.
// the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful.
// If only_empty is true, only empty regions are considered.
// Searches from bottom to top of the heap, doing a first-fit.
uint find_contiguous(size_t num, bool only_empty);
......@@ -117,7 +117,7 @@ class HeapRegionSeq: public CHeapObj<mtGC> {
// sequence could be found, otherwise res_idx contains the start index of this range.
uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const;
// Allocate a new HeapRegion for the given index.
HeapRegion* new_heap_region(uint hrs_index);
HeapRegion* new_heap_region(uint hrm_index);
#ifdef ASSERT
public:
bool is_free(HeapRegion* hr) const;
......@@ -125,9 +125,9 @@ public:
// Returns whether the given region is available for allocation.
bool is_available(uint region) const;
public:
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0),
public:
// Empty constructor, we'll initialize it with the initialize() method.
HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0),
_next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL),
_allocated_heapregions_length(0), _available_map(),
_free_list("Free list", new MasterFreeRegionListMtSafeChecker())
......@@ -167,7 +167,7 @@ public:
if (hr != NULL) {
assert(hr->next() == NULL, "Single region should not have next");
assert(is_available(hr->hrs_index()), "Must be committed");
assert(is_available(hr->hrm_index()), "Must be committed");
}
return hr;
}
......@@ -211,10 +211,10 @@ public:
uint expand_at(uint start, uint num_regions);
// Find a contiguous set of empty regions of length num. Returns the start index of
// that set, or G1_NO_HRS_INDEX.
// that set, or G1_NO_HRM_INDEX.
uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); }
// Find a contiguous set of empty or unavailable regions of length num. Returns the
// start index of that set, or G1_NO_HRS_INDEX.
// start index of that set, or G1_NO_HRM_INDEX.
uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); }
HeapRegion* next_region_in_heap(const HeapRegion* r) const;
......@@ -235,4 +235,4 @@ public:
void verify_optional() PRODUCT_RETURN;
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部