提交 7cd059e8 编写于 作者: D dholmes

Merge

......@@ -525,3 +525,24 @@ b63d0e8bfc0738bba21ae67779780f59118a95f7 jdk8u40-b07
d96716f6cbba9f000dfb1da39d2b81264f4cdea7 hs25.40-b13
7ff8d51e0d8fc71f3ad31fd15817083341416ca8 jdk8u40-b09
e193bbae24effeaf476f688d8d840787db53d74e hs25.40-b14
a4d44dfb7d30eea54bc172e4429a655454ae0bbf jdk8u25-b00
9a2152fbd929b0d8b2f5c326a5526214ae71731a jdk8u25-b01
d3d5604ea0dea3812e87ba76ac199d0a8be6f49f jdk8u25-b02
27348de6239bb527c37c0bf59e938ed127b619a7 jdk8u25-b03
220eefb3609e250a0bb0ed26236c1213b8000050 jdk8u25-b04
db8383148bc9417dd4c38fa4cea39510f17325f3 jdk8u25-b05
605df8463453628df49351fa63632666f18698cd jdk8u25-b06
520188d4bade17dbe75163d1f635c08168ea560c jdk8u25-b07
f3f50c4f9ea5d3af40cb794b6f3f2a337c8873db jdk8u25-b08
4f209b7a580c78bac255e69f4724c42584c32a7d jdk8u25-b09
19c692f1e4c571a9285e33e7d3d15948769fcbdd jdk8u25-b10
9e2bb00a81910776d5b16c49a3f4c5264ceab522 jdk8u25-b11
2993491d47df8c4b096ea7fa534162bde8b53dcf jdk8u25-b12
ca6d25be853b5c428c6228871316671843264666 jdk8u25-b13
c77d5db189422e2eef0443ee212644e497113b18 jdk8u25-b14
e62c06b887310b5bd23be9b817a9a6f0daf0d0e1 jdk8u25-b15
6467bdd4d22d8b140844dc847c43b9ba7cb0bbd1 jdk8u25-b16
28b50d07f6f8c5a567b6a25e95a423948114a004 jdk8u25-b17
639abc668bfe995dba811dd35411b9ea8a9041cd jdk8u25-b18
1b3abbeee961dee49780c0e4af5337feb918c555 jdk8u40-b10
f10fe402dfb1543723b4b117a7cba3ea3d4159f1 hs25.40-b15
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -82,6 +82,7 @@
_JVM_EnableCompiler
_JVM_Exit
_JVM_FillInStackTrace
_JVM_FindClassFromCaller
_JVM_FindClassFromClass
_JVM_FindClassFromClassLoader
_JVM_FindClassFromBootLoader
......
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -82,6 +82,7 @@
_JVM_EnableCompiler
_JVM_Exit
_JVM_FillInStackTrace
_JVM_FindClassFromCaller
_JVM_FindClassFromClass
_JVM_FindClassFromClassLoader
_JVM_FindClassFromBootLoader
......
......@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2014
HS_MAJOR_VER=25
HS_MINOR_VER=40
HS_BUILD_NUMBER=15
HS_BUILD_NUMBER=16
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
......
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -84,6 +84,7 @@ SUNWprivate_1.1 {
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
......
#
# Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -84,6 +84,7 @@ SUNWprivate_1.1 {
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
......
#
# Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
......@@ -84,6 +84,7 @@ SUNWprivate_1.1 {
JVM_EnableCompiler;
JVM_Exit;
JVM_FillInStackTrace;
JVM_FindClassFromCaller;
JVM_FindClassFromClass;
JVM_FindClassFromClassLoader;
JVM_FindClassFromBootLoader;
......
......@@ -873,14 +873,19 @@ void VM_Version::get_processor_features() {
if (supports_bmi1()) {
// tzcnt does not require VEX prefix
if (FLAG_IS_DEFAULT(UseCountTrailingZerosInstruction)) {
UseCountTrailingZerosInstruction = true;
if (!UseBMI1Instructions && !FLAG_IS_DEFAULT(UseBMI1Instructions)) {
// Don't use tzcnt if BMI1 is switched off on command line.
UseCountTrailingZerosInstruction = false;
} else {
UseCountTrailingZerosInstruction = true;
}
}
} else if (UseCountTrailingZerosInstruction) {
warning("tzcnt instruction is not available on this CPU");
FLAG_SET_DEFAULT(UseCountTrailingZerosInstruction, false);
}
// BMI instructions use an encoding with VEX prefix.
// BMI instructions (except tzcnt) use an encoding with VEX prefix.
// VEX prefix is generated only when AVX > 0.
if (supports_bmi1() && supports_avx()) {
if (FLAG_IS_DEFAULT(UseBMI1Instructions)) {
......
......@@ -2830,6 +2830,11 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
"bootstrap_method_index %u has bad constant type in class file %s",
bootstrap_method_index,
CHECK);
guarantee_property((operand_fill_index + 1 + argument_count) < operands->length(),
"Invalid BootstrapMethods num_bootstrap_methods or num_bootstrap_arguments value in class file %s",
CHECK);
operands->at_put(operand_fill_index++, bootstrap_method_index);
operands->at_put(operand_fill_index++, argument_count);
......@@ -2847,7 +2852,6 @@ void ClassFileParser::parse_classfile_bootstrap_methods_attribute(u4 attribute_b
}
assert(operand_fill_index == operands->length(), "exact fill");
assert(ConstantPool::operand_array_length(operands) == attribute_array_length, "correct decode");
u1* current_end = cfs->current();
guarantee_property(current_end == current_start + attribute_byte_length,
......
......@@ -90,6 +90,7 @@ typedef jzentry* (JNICALL *FindEntry_t)(jzfile *zip, const char *name, jint *siz
typedef jboolean (JNICALL *ReadEntry_t)(jzfile *zip, jzentry *entry, unsigned char *buf, char *namebuf);
typedef jboolean (JNICALL *ReadMappedEntry_t)(jzfile *zip, jzentry *entry, unsigned char **buf, char *namebuf);
typedef jzentry* (JNICALL *GetNextEntry_t)(jzfile *zip, jint n);
typedef jint (JNICALL *Crc32_t)(jint crc, const jbyte *buf, jint len);
static ZipOpen_t ZipOpen = NULL;
static ZipClose_t ZipClose = NULL;
......@@ -98,6 +99,7 @@ static ReadEntry_t ReadEntry = NULL;
static ReadMappedEntry_t ReadMappedEntry = NULL;
static GetNextEntry_t GetNextEntry = NULL;
static canonicalize_fn_t CanonicalizeEntry = NULL;
static Crc32_t Crc32 = NULL;
// Globals
......@@ -810,9 +812,11 @@ void ClassLoader::load_zip_library() {
ReadEntry = CAST_TO_FN_PTR(ReadEntry_t, os::dll_lookup(handle, "ZIP_ReadEntry"));
ReadMappedEntry = CAST_TO_FN_PTR(ReadMappedEntry_t, os::dll_lookup(handle, "ZIP_ReadMappedEntry"));
GetNextEntry = CAST_TO_FN_PTR(GetNextEntry_t, os::dll_lookup(handle, "ZIP_GetNextEntry"));
Crc32 = CAST_TO_FN_PTR(Crc32_t, os::dll_lookup(handle, "ZIP_CRC32"));
// ZIP_Close is not exported on Windows in JDK5.0 so don't abort if ZIP_Close is NULL
if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL || GetNextEntry == NULL) {
if (ZipOpen == NULL || FindEntry == NULL || ReadEntry == NULL ||
GetNextEntry == NULL || Crc32 == NULL) {
vm_exit_during_initialization("Corrupted ZIP library", path);
}
......@@ -822,6 +826,11 @@ void ClassLoader::load_zip_library() {
// This lookup only works on 1.3. Do not check for non-null here
}
int ClassLoader::crc32(int crc, const char* buf, int len) {
assert(Crc32 != NULL, "ZIP_CRC32 is not found");
return (*Crc32)(crc, (const jbyte*)buf, len);
}
// PackageInfo data exists in order to support the java.lang.Package
// class. A Package object provides information about a java package
// (version, vendor, etc.) which originates in the manifest of the jar
......
......@@ -228,6 +228,7 @@ class ClassLoader: AllStatic {
// to avoid confusing the zip library
static bool get_canonical_path(const char* orig, char* out, int len);
public:
static int crc32(int crc, const char* buf, int len);
static bool update_class_path_entry_list(const char *path,
bool check_for_duplicates,
bool throw_exception=true);
......
......@@ -148,7 +148,7 @@ int StackMapFrame::is_assignable_to(
VerificationType* from, VerificationType* to, int32_t len, TRAPS) const {
int32_t i = 0;
for (i = 0; i < len; i++) {
if (!to[i].is_assignable_from(from[i], verifier(), THREAD)) {
if (!to[i].is_assignable_from(from[i], verifier(), false, THREAD)) {
break;
}
}
......@@ -245,7 +245,7 @@ VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
}
VerificationType top = _stack[--_stack_size];
bool subtype = type.is_assignable_from(
top, verifier(), CHECK_(VerificationType::bogus_type()));
top, verifier(), false, CHECK_(VerificationType::bogus_type()));
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset, stack_top_ctx(),
......@@ -265,7 +265,7 @@ VerificationType StackMapFrame::get_local(
return VerificationType::bogus_type();
}
bool subtype = type.is_assignable_from(_locals[index],
verifier(), CHECK_(VerificationType::bogus_type()));
verifier(), false, CHECK_(VerificationType::bogus_type()));
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset,
......@@ -288,14 +288,14 @@ void StackMapFrame::get_local_2(
"get long/double overflows locals");
return;
}
bool subtype = type1.is_assignable_from(_locals[index], verifier(), CHECK);
bool subtype = type1.is_assignable_from(_locals[index], verifier(), false, CHECK);
if (!subtype) {
verifier()->verify_error(
ErrorContext::bad_type(_offset,
TypeOrigin::local(index, this), TypeOrigin::implicit(type1)),
"Bad local variable type");
} else {
subtype = type2.is_assignable_from(_locals[index + 1], verifier(), CHECK);
subtype = type2.is_assignable_from(_locals[index + 1], verifier(), false, CHECK);
if (!subtype) {
/* Unreachable? All local store routines convert a split long or double
* into a TOP during the store. So we should never end up seeing an
......
......@@ -234,7 +234,7 @@ class StackMapFrame : public ResourceObj {
if (_stack_size != 0) {
VerificationType top = _stack[_stack_size - 1];
bool subtype = type.is_assignable_from(
top, verifier(), CHECK_(VerificationType::bogus_type()));
top, verifier(), false, CHECK_(VerificationType::bogus_type()));
if (subtype) {
--_stack_size;
return top;
......@@ -249,9 +249,9 @@ class StackMapFrame : public ResourceObj {
assert(type2.is_long() || type2.is_double(), "must be long/double_2");
if (_stack_size >= 2) {
VerificationType top1 = _stack[_stack_size - 1];
bool subtype1 = type1.is_assignable_from(top1, verifier(), CHECK);
bool subtype1 = type1.is_assignable_from(top1, verifier(), false, CHECK);
VerificationType top2 = _stack[_stack_size - 2];
bool subtype2 = type2.is_assignable_from(top2, verifier(), CHECK);
bool subtype2 = type2.is_assignable_from(top2, verifier(), false, CHECK);
if (subtype1 && subtype2) {
_stack_size -= 2;
return;
......
/*
* Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -42,7 +42,8 @@ VerificationType VerificationType::from_tag(u1 tag) {
}
bool VerificationType::is_reference_assignable_from(
const VerificationType& from, ClassVerifier* context, TRAPS) const {
const VerificationType& from, ClassVerifier* context,
bool from_field_is_protected, TRAPS) const {
instanceKlassHandle klass = context->current_class();
if (from.is_null()) {
// null is assignable to any reference
......@@ -62,9 +63,11 @@ bool VerificationType::is_reference_assignable_from(
Handle(THREAD, klass->protection_domain()), true, CHECK_false);
KlassHandle this_class(THREAD, obj);
if (this_class->is_interface()) {
// We treat interfaces as java.lang.Object, including
// java.lang.Cloneable and java.io.Serializable
if (this_class->is_interface() && (!from_field_is_protected ||
from.name() != vmSymbols::java_lang_Object())) {
// If we are not trying to access a protected field or method in
// java.lang.Object then we treat interfaces as java.lang.Object,
// including java.lang.Cloneable and java.io.Serializable.
return true;
} else if (from.is_object()) {
Klass* from_class = SystemDictionary::resolve_or_fail(
......@@ -76,7 +79,8 @@ bool VerificationType::is_reference_assignable_from(
VerificationType comp_this = get_component(context, CHECK_false);
VerificationType comp_from = from.get_component(context, CHECK_false);
if (!comp_this.is_bogus() && !comp_from.is_bogus()) {
return comp_this.is_assignable_from(comp_from, context, CHECK_false);
return comp_this.is_assignable_from(comp_from, context,
from_field_is_protected, CHECK_false);
}
}
return false;
......
......@@ -265,7 +265,8 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
// is assignable to another. Returns true if one can assign 'from' to
// this.
bool is_assignable_from(
const VerificationType& from, ClassVerifier* context, TRAPS) const {
const VerificationType& from, ClassVerifier* context,
bool from_field_is_protected, TRAPS) const {
if (equals(from) || is_bogus()) {
return true;
} else {
......@@ -286,7 +287,9 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
return from.is_integer();
default:
if (is_reference() && from.is_reference()) {
return is_reference_assignable_from(from, context, CHECK_false);
return is_reference_assignable_from(from, context,
from_field_is_protected,
CHECK_false);
} else {
return false;
}
......@@ -308,7 +311,8 @@ class VerificationType VALUE_OBJ_CLASS_SPEC {
private:
bool is_reference_assignable_from(
const VerificationType&, ClassVerifier*, TRAPS) const;
const VerificationType&, ClassVerifier*, bool from_field_is_protected,
TRAPS) const;
};
#endif // SHARE_VM_CLASSFILE_VERIFICATIONTYPE_HPP
......@@ -1734,7 +1734,7 @@ void ClassVerifier::verify_exception_handler_table(u4 code_length, char* code_da
VerificationType throwable =
VerificationType::reference_type(vmSymbols::java_lang_Throwable());
bool is_subclass = throwable.is_assignable_from(
catch_type, this, CHECK_VERIFY(this));
catch_type, this, false, CHECK_VERIFY(this));
if (!is_subclass) {
// 4286534: should throw VerifyError according to recent spec change
verify_error(ErrorContext::bad_type(handler_pc,
......@@ -2189,7 +2189,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
stack_object_type = current_type();
}
is_assignable = target_class_type.is_assignable_from(
stack_object_type, this, CHECK_VERIFY(this));
stack_object_type, this, false, CHECK_VERIFY(this));
if (!is_assignable) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(),
......@@ -2216,7 +2216,7 @@ void ClassVerifier::verify_field_instructions(RawBytecodeStream* bcs,
// It's protected access, check if stack object is assignable to
// current class.
is_assignable = current_type().is_assignable_from(
stack_object_type, this, CHECK_VERIFY(this));
stack_object_type, this, true, CHECK_VERIFY(this));
if (!is_assignable) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(),
......@@ -2489,7 +2489,7 @@ void ClassVerifier::verify_invoke_init(
instanceKlassHandle mh(THREAD, m->method_holder());
if (m->is_protected() && !mh->is_same_class_package(_klass())) {
bool assignable = current_type().is_assignable_from(
objectref_type, this, CHECK_VERIFY(this));
objectref_type, this, true, CHECK_VERIFY(this));
if (!assignable) {
verify_error(ErrorContext::bad_type(bci,
TypeOrigin::cp(new_class_index, objectref_type),
......@@ -2664,11 +2664,11 @@ void ClassVerifier::verify_invoke_instructions(
bool have_imr_indirect = cp->tag_at(index).value() == JVM_CONSTANT_InterfaceMethodref;
if (!current_class()->is_anonymous()) {
subtype = ref_class_type.is_assignable_from(
current_type(), this, CHECK_VERIFY(this));
current_type(), this, false, CHECK_VERIFY(this));
} else {
VerificationType host_klass_type =
VerificationType::reference_type(current_class()->host_klass()->name());
subtype = ref_class_type.is_assignable_from(host_klass_type, this, CHECK_VERIFY(this));
subtype = ref_class_type.is_assignable_from(host_klass_type, this, false, CHECK_VERIFY(this));
// If invokespecial of IMR, need to recheck for same or
// direct interface relative to the host class
......@@ -2712,7 +2712,7 @@ void ClassVerifier::verify_invoke_instructions(
VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
VerificationType hosttype =
VerificationType::reference_type(current_class()->host_klass()->name());
bool subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
bool subtype = hosttype.is_assignable_from(top, this, false, CHECK_VERIFY(this));
if (!subtype) {
verify_error( ErrorContext::bad_type(current_frame->offset(),
current_frame->stack_top_ctx(),
......@@ -2737,7 +2737,7 @@ void ClassVerifier::verify_invoke_instructions(
// It's protected access, check if stack object is
// assignable to current class.
bool is_assignable = current_type().is_assignable_from(
stack_object_type, this, CHECK_VERIFY(this));
stack_object_type, this, true, CHECK_VERIFY(this));
if (!is_assignable) {
if (ref_class_type.name() == vmSymbols::java_lang_Object()
&& stack_object_type.is_array()
......@@ -2920,7 +2920,7 @@ void ClassVerifier::verify_return_value(
"Method expects a return value");
return;
}
bool match = return_type.is_assignable_from(type, this, CHECK_VERIFY(this));
bool match = return_type.is_assignable_from(type, this, false, CHECK_VERIFY(this));
if (!match) {
verify_error(ErrorContext::bad_type(bci,
current_frame->stack_top_ctx(), TypeOrigin::signature(return_type)),
......
......@@ -246,6 +246,12 @@ void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle kl
// Ignore overpasses so statics can be found during resolution
Method* result_oop = klass->uncached_lookup_method(name, signature, Klass::skip_overpass);
if (klass->oop_is_array()) {
// Only consider klass and super klass for arrays
result = methodHandle(THREAD, result_oop);
return;
}
// JDK 8, JVMS 5.4.3.4: Interface method resolution should
// ignore static and non-public methods of java.lang.Object,
// like clone, finalize, registerNatives.
......@@ -290,6 +296,11 @@ void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, Klass
result = methodHandle(THREAD, super_klass->uncached_lookup_method(name, signature, Klass::normal));
}
if (klass->oop_is_array()) {
// Only consider klass and super klass for arrays
return;
}
if (result.is_null()) {
Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
if (default_methods != NULL) {
......@@ -546,7 +557,7 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
// 2. lookup method in resolved klass and its super klasses
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, true, false, CHECK);
if (resolved_method.is_null()) { // not found in the class hierarchy
if (resolved_method.is_null() && !resolved_klass->oop_is_array()) { // not found in the class hierarchy
// 3. lookup method in all the interfaces implemented by the resolved klass
lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
......@@ -559,16 +570,16 @@ void LinkResolver::resolve_method(methodHandle& resolved_method, KlassHandle res
CLEAR_PENDING_EXCEPTION;
}
}
}
if (resolved_method.is_null()) {
// 4. method lookup failed
ResourceMark rm(THREAD);
THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
Method::name_and_sig_as_C_string(resolved_klass(),
method_name,
method_signature),
nested_exception);
}
if (resolved_method.is_null()) {
// 4. method lookup failed
ResourceMark rm(THREAD);
THROW_MSG_CAUSE(vmSymbols::java_lang_NoSuchMethodError(),
Method::name_and_sig_as_C_string(resolved_klass(),
method_name,
method_signature),
nested_exception);
}
// 5. access checks, access checking may be turned off when calling from within the VM.
......@@ -634,17 +645,18 @@ void LinkResolver::resolve_interface_method(methodHandle& resolved_method,
// JDK8: also look for static methods
lookup_method_in_klasses(resolved_method, resolved_klass, method_name, method_signature, false, true, CHECK);
if (resolved_method.is_null()) {
if (resolved_method.is_null() && !resolved_klass->oop_is_array()) {
// lookup method in all the super-interfaces
lookup_method_in_interfaces(resolved_method, resolved_klass, method_name, method_signature, CHECK);
if (resolved_method.is_null()) {
// no method found
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(),
Method::name_and_sig_as_C_string(resolved_klass(),
method_name,
method_signature));
}
}
if (resolved_method.is_null()) {
// no method found
ResourceMark rm(THREAD);
THROW_MSG(vmSymbols::java_lang_NoSuchMethodError(),
Method::name_and_sig_as_C_string(resolved_klass(),
method_name,
method_signature));
}
if (check_access) {
......@@ -776,7 +788,7 @@ void LinkResolver::resolve_field(fieldDescriptor& fd, KlassHandle resolved_klass
}
// Resolve instance field
KlassHandle sel_klass(THREAD, InstanceKlass::cast(resolved_klass())->find_field(field, sig, &fd));
KlassHandle sel_klass(THREAD, resolved_klass->find_field(field, sig, &fd));
// check if field exists; i.e., if a klass containing the field def has been selected
if (sel_klass.is_null()) {
ResourceMark rm(THREAD);
......
......@@ -314,7 +314,6 @@ bool FileMapInfo::init_from_file(int fd) {
fail_continue("The shared archive file has the wrong version.");
return false;
}
_file_offset = (long)n;
size_t info_size = _header->_paths_misc_info_size;
_paths_misc_info = NEW_C_HEAP_ARRAY_RETURN_NULL(char, info_size, mtClass);
......@@ -330,6 +329,14 @@ bool FileMapInfo::init_from_file(int fd) {
return false;
}
size_t len = lseek(fd, 0, SEEK_END);
struct FileMapInfo::FileMapHeader::space_info* si =
&_header->_space[MetaspaceShared::mc];
if (si->_file_offset >= len || len - si->_file_offset < si->_used) {
fail_continue("The shared archive file has been truncated.");
return false;
}
_file_offset += (long)n;
return true;
}
......@@ -430,6 +437,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
si->_capacity = capacity;
si->_read_only = read_only;
si->_allow_exec = allow_exec;
si->_crc = ClassLoader::crc32(0, base, (jint)size);
write_bytes_aligned(base, (int)size);
}
......@@ -454,14 +462,15 @@ void FileMapInfo::write_bytes(const void* buffer, int nbytes) {
// Align file position to an allocation unit boundary.
void FileMapInfo::align_file_position() {
long new_file_offset = align_size_up(_file_offset, os::vm_allocation_granularity());
size_t new_file_offset = align_size_up(_file_offset,
os::vm_allocation_granularity());
if (new_file_offset != _file_offset) {
_file_offset = new_file_offset;
if (_file_open) {
// Seek one byte back from the target and write a byte to insure
// that the written file is the correct length.
_file_offset -= 1;
if (lseek(_fd, _file_offset, SEEK_SET) < 0) {
if (lseek(_fd, (long)_file_offset, SEEK_SET) < 0) {
fail_stop("Unable to seek.", NULL);
}
char zero = 0;
......@@ -568,6 +577,19 @@ char* FileMapInfo::map_region(int i) {
return base;
}
bool FileMapInfo::verify_region_checksum(int i) {
if (!VerifySharedSpaces) {
return true;
}
const char* buf = _header->_space[i]._base;
size_t sz = _header->_space[i]._used;
int crc = ClassLoader::crc32(0, buf, (jint)sz);
if (crc != _header->_space[i]._crc) {
fail_continue("Checksum verification failed.");
return false;
}
return true;
}
// Unmap a memory region in the address space.
......@@ -628,15 +650,33 @@ bool FileMapInfo::initialize() {
return true;
}
int FileMapInfo::FileMapHeader::compute_crc() {
char* header = data();
// start computing from the field after _crc
char* buf = (char*)&_crc + sizeof(int);
size_t sz = data_size() - (buf - header);
int crc = ClassLoader::crc32(0, buf, (jint)sz);
return crc;
}
int FileMapInfo::compute_header_crc() {
return _header->compute_crc();
}
bool FileMapInfo::FileMapHeader::validate() {
if (_version != current_version()) {
FileMapInfo::fail_continue("The shared archive file is the wrong version.");
return false;
}
if (_magic != (int)0xf00baba2) {
FileMapInfo::fail_continue("The shared archive file has a bad magic number.");
return false;
}
if (VerifySharedSpaces && compute_crc() != _crc) {
fail_continue("Header checksum verification failed.");
return false;
}
if (_version != current_version()) {
FileMapInfo::fail_continue("The shared archive file is the wrong version.");
return false;
}
char header_version[JVM_IDENT_MAX];
get_header_version(header_version);
if (strncmp(_jvm_ident, header_version, JVM_IDENT_MAX-1) != 0) {
......
......@@ -61,7 +61,7 @@ private:
bool _file_open;
int _fd;
long _file_offset;
size_t _file_offset;
private:
static SharedClassPathEntry* _classpath_entry_table;
......@@ -87,12 +87,14 @@ public:
}
int _magic; // identify file type.
int _crc; // header crc checksum.
int _version; // (from enum, above.)
size_t _alignment; // how shared archive should be aligned
int _obj_alignment; // value of ObjectAlignmentInBytes
struct space_info {
int _file_offset; // sizeof(this) rounded to vm page size
int _crc; // crc checksum of the current space
size_t _file_offset; // sizeof(this) rounded to vm page size
char* _base; // copy-on-write base address
size_t _capacity; // for validity checking
size_t _used; // for setting space top on read
......@@ -135,6 +137,7 @@ public:
virtual bool validate();
virtual void populate(FileMapInfo* info, size_t alignment);
int compute_crc();
};
FileMapHeader * _header;
......@@ -153,6 +156,8 @@ public:
~FileMapInfo();
static int current_version() { return _current_version; }
int compute_header_crc();
void set_header_crc(int crc) { _header->_crc = crc; }
void populate_header(size_t alignment);
bool validate_header();
void invalidate();
......@@ -181,6 +186,7 @@ public:
void write_bytes_aligned(const void* buffer, int count);
char* map_region(int i);
void unmap_region(int i);
bool verify_region_checksum(int i);
void close();
bool is_open() { return _file_open; }
ReservedSpace reserve_shared_memory();
......
......@@ -607,6 +607,7 @@ void VM_PopulateDumpSharedSpace::doit() {
// Pass 2 - write data.
mapinfo->open_for_write();
mapinfo->set_header_crc(mapinfo->compute_header_crc());
mapinfo->write_header();
mapinfo->write_space(MetaspaceShared::ro, _loader_data->ro_metaspace(), true);
mapinfo->write_space(MetaspaceShared::rw, _loader_data->rw_metaspace(), false);
......@@ -936,9 +937,13 @@ bool MetaspaceShared::map_shared_spaces(FileMapInfo* mapinfo) {
// Map each shared region
if ((_ro_base = mapinfo->map_region(ro)) != NULL &&
mapinfo->verify_region_checksum(ro) &&
(_rw_base = mapinfo->map_region(rw)) != NULL &&
mapinfo->verify_region_checksum(rw) &&
(_md_base = mapinfo->map_region(md)) != NULL &&
mapinfo->verify_region_checksum(md) &&
(_mc_base = mapinfo->map_region(mc)) != NULL &&
mapinfo->verify_region_checksum(mc) &&
(image_alignment == (size_t)max_alignment()) &&
mapinfo->validate_classpath_entry_table()) {
// Success (no need to do anything)
......
......@@ -64,6 +64,13 @@ oop ArrayKlass::multi_allocate(int rank, jint* sizes, TRAPS) {
return NULL;
}
// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
Klass* ArrayKlass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
// There are no fields in an array klass but look to the super class (Object)
assert(super(), "super klass must be present");
return super()->find_field(name, sig, fd);
}
Method* ArrayKlass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
// There are no methods in an array klass but the super class (Object) has some
assert(super(), "super klass must be present");
......
......@@ -28,6 +28,7 @@
#include "memory/universe.hpp"
#include "oops/klass.hpp"
class fieldDescriptor;
class klassVtable;
// ArrayKlass is the abstract baseclass for all array classes
......@@ -85,6 +86,9 @@ class ArrayKlass: public Klass {
virtual oop multi_allocate(int rank, jint* sizes, TRAPS);
objArrayOop allocate_arrayArray(int n, int length, TRAPS);
// find field according to JVM spec 5.4.3.2, returns the klass in which the field is defined
Klass* find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const;
// Lookup operations
Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
......
......@@ -130,6 +130,15 @@ bool Klass::compute_is_subtype_of(Klass* k) {
return is_subclass_of(k);
}
Klass* Klass::find_field(Symbol* name, Symbol* sig, fieldDescriptor* fd) const {
#ifdef ASSERT
tty->print_cr("Error: find_field called on a klass oop."
" Likely error: reflection method does not correctly"
" wrap return value in a mirror object.");
#endif
ShouldNotReachHere();
return NULL;
}
Method* Klass::uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const {
#ifdef ASSERT
......
......@@ -90,6 +90,7 @@ class ClassLoaderData;
class klassVtable;
class ParCompactionManager;
class KlassSizeStats;
class fieldDescriptor;
class Klass : public Metadata {
friend class VMStructs;
......@@ -441,6 +442,7 @@ protected:
virtual void initialize(TRAPS);
// lookup operation for MethodLookupCache
friend class MethodLookupCache;
virtual Klass* find_field(Symbol* name, Symbol* signature, fieldDescriptor* fd) const;
virtual Method* uncached_lookup_method(Symbol* name, Symbol* signature, MethodLookupMode mode) const;
public:
Method* lookup_method(Symbol* name, Symbol* signature) const {
......
......@@ -799,10 +799,16 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
// each arm of the Phi. If I know something clever about the exceptions
// I'm loading the class from, I can replace the LoadKlass with the
// klass constant for the exception oop.
if( ex_node->is_Phi() ) {
ex_klass_node = new (C) PhiNode( ex_node->in(0), TypeKlassPtr::OBJECT );
for( uint i = 1; i < ex_node->req(); i++ ) {
Node* p = basic_plus_adr( ex_node->in(i), ex_node->in(i), oopDesc::klass_offset_in_bytes() );
if (ex_node->is_Phi()) {
ex_klass_node = new (C) PhiNode(ex_node->in(0), TypeKlassPtr::OBJECT);
for (uint i = 1; i < ex_node->req(); i++) {
Node* ex_in = ex_node->in(i);
if (ex_in == top() || ex_in == NULL) {
// This path was not taken.
ex_klass_node->init_req(i, top());
continue;
}
Node* p = basic_plus_adr(ex_in, ex_in, oopDesc::klass_offset_in_bytes());
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeInstPtr::KLASS, TypeKlassPtr::OBJECT) );
ex_klass_node->init_req( i, k );
}
......
......@@ -808,6 +808,7 @@ JVM_ENTRY(jclass, JVM_FindClassFromBootLoader(JNIEnv* env,
return (jclass) JNIHandles::make_local(env, k->java_mirror());
JVM_END
// Not used; JVM_FindClassFromCaller replaces this.
JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
jboolean init, jobject loader,
jboolean throwError))
......@@ -834,6 +835,42 @@ JVM_ENTRY(jclass, JVM_FindClassFromClassLoader(JNIEnv* env, const char* name,
return result;
JVM_END
// Find a class with this name in this loader, using the caller's protection domain.
JVM_ENTRY(jclass, JVM_FindClassFromCaller(JNIEnv* env, const char* name,
jboolean init, jobject loader,
jclass caller))
JVMWrapper2("JVM_FindClassFromCaller %s throws ClassNotFoundException", name);
// Java libraries should ensure that name is never null...
if (name == NULL || (int)strlen(name) > Symbol::max_length()) {
// It's impossible to create this class; the name cannot fit
// into the constant pool.
THROW_MSG_0(vmSymbols::java_lang_ClassNotFoundException(), name);
}
TempNewSymbol h_name = SymbolTable::new_symbol(name, CHECK_NULL);
oop loader_oop = JNIHandles::resolve(loader);
oop from_class = JNIHandles::resolve(caller);
oop protection_domain = NULL;
// If loader is null, shouldn't call ClassLoader.checkPackageAccess; otherwise get
// NPE. Put it in another way, the bootstrap class loader has all permission and
// thus no checkPackageAccess equivalence in the VM class loader.
// The caller is also passed as NULL by the java code if there is no security
// manager to avoid the performance cost of getting the calling class.
if (from_class != NULL && loader_oop != NULL) {
protection_domain = java_lang_Class::as_Klass(from_class)->protection_domain();
}
Handle h_loader(THREAD, loader_oop);
Handle h_prot(THREAD, protection_domain);
jclass result = find_class_from_class_loader(env, h_name, init, h_loader,
h_prot, false, THREAD);
if (TraceClassResolution && result != NULL) {
trace_class_resolution(java_lang_Class::as_Klass(JNIHandles::resolve_non_null(result)));
}
return result;
JVM_END
JVM_ENTRY(jclass, JVM_FindClassFromClass(JNIEnv *env, const char *name,
jboolean init, jclass from))
......@@ -3997,10 +4034,15 @@ void initialize_converter_functions() {
// Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init, Handle loader, Handle protection_domain, jboolean throwError, TRAPS) {
jclass find_class_from_class_loader(JNIEnv* env, Symbol* name, jboolean init,
Handle loader, Handle protection_domain,
jboolean throwError, TRAPS) {
// Security Note:
// The Java level wrapper will perform the necessary security check allowing
// us to pass the NULL as the initiating class loader.
// us to pass the NULL as the initiating class loader. The VM is responsible for
// the checkPackageAccess relative to the initiating class loader via the
// protection_domain. The protection_domain is passed as NULL by the java code
// if there is no security manager in 3-arg Class.forName().
Klass* klass = SystemDictionary::resolve_or_fail(name, loader, protection_domain, throwError != 0, CHECK_NULL);
KlassHandle klass_handle(THREAD, klass);
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -419,6 +419,19 @@ JVM_FindClassFromClassLoader(JNIEnv *env, const char *name, jboolean init,
JNIEXPORT jclass JNICALL
JVM_FindClassFromBootLoader(JNIEnv *env, const char *name);
/*
* Find a class from a given class loader. Throws ClassNotFoundException.
* name: name of class
* init: whether initialization is done
* loader: class loader to look up the class. This may not be the same as the caller's
* class loader.
* caller: initiating class. The initiating class may be null when a security
* manager is not installed.
*/
JNIEXPORT jclass JNICALL
JVM_FindClassFromCaller(JNIEnv *env, const char *name, jboolean init,
jobject loader, jclass caller);
/*
* Find a class from a given class.
*/
......
......@@ -331,7 +331,36 @@ WB_ENTRY(void, WB_NMTOverflowHashBucket(JNIEnv* env, jobject o, jlong num))
}
WB_END
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
MemTracker::transition_to(NMT_off);
return MemTracker::tracking_level() == NMT_off;
} else {
assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking");
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now");
// Can't go to detail once NMT is set to summary.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now");
// Shutdown sets tracking level to minimal.
MemTracker::shutdown();
assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now");
// Once the tracking level is minimal, we cannot increase to summary.
// The code ignores this request instead of asserting because if the malloc site
// table overflows in another thread, it tries to change the code to summary.
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
// Really can never go up to detail, verify that the code would never do this.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
return MemTracker::tracking_level() == NMT_minimal;
}
WB_END
#endif // INCLUDE_NMT
static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobject method) {
......@@ -936,6 +965,7 @@ static JNINativeMethod methods[] = {
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTOverflowHashBucket", CC"(J)V", (void*)&WB_NMTOverflowHashBucket},
{CC"NMTIsDetailSupported",CC"()Z", (void*)&WB_NMTIsDetailSupported},
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
#endif // INCLUDE_NMT
{CC"deoptimizeAll", CC"()V", (void*)&WB_DeoptimizeAll },
{CC"deoptimizeMethod", CC"(Ljava/lang/reflect/Executable;Z)I",
......
......@@ -2457,6 +2457,10 @@ bool Arguments::check_vm_args_consistency() {
warning("The VM option CICompilerCountPerCPU overrides CICompilerCount.");
}
#ifdef COMPILER1
status &= verify_interval(SafepointPollOffset, 0, os::vm_page_size() - BytesPerWord, "SafepointPollOffset");
#endif
return status;
}
......@@ -3710,6 +3714,11 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
return JNI_ENOMEM;
}
// Set up VerifySharedSpaces
if (FLAG_IS_DEFAULT(VerifySharedSpaces) && SharedArchiveFile != NULL) {
VerifySharedSpaces = true;
}
// Delay warning until here so that we've had a chance to process
// the -XX:-PrintWarnings flag
if (needs_hotspotrc_warning) {
......
......@@ -1154,11 +1154,11 @@ class CommandLineFlags {
"Prevent spurious or premature wakeups from object.wait " \
"(Solaris only)") \
\
product(intx, NativeMonitorTimeout, -1, "(Unstable)") \
experimental(intx, NativeMonitorTimeout, -1, "(Unstable)") \
\
product(intx, NativeMonitorFlags, 0, "(Unstable)") \
experimental(intx, NativeMonitorFlags, 0, "(Unstable)") \
\
product(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \
experimental(intx, NativeMonitorSpinLimit, 20, "(Unstable)") \
\
develop(bool, UsePthreads, false, \
"Use pthread-based instead of libthread-based synchronization " \
......@@ -3775,6 +3775,10 @@ class CommandLineFlags {
product(bool, UseSharedSpaces, true, \
"Use shared spaces for metadata") \
\
product(bool, VerifySharedSpaces, false, \
"Verify shared spaces (false for default archive, true for " \
"archive specified by -XX:SharedArchiveFile)") \
\
product(bool, RequireSharedSpaces, false, \
"Require shared spaces for metadata") \
\
......
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
......@@ -482,7 +482,7 @@ static bool under_host_klass(InstanceKlass* ik, Klass* host_klass) {
ik = InstanceKlass::cast(hc);
// There's no way to make a host class loop short of patching memory.
// Therefore there cannot be a loop here unles there's another bug.
// Therefore there cannot be a loop here unless there's another bug.
// Still, let's check for it.
assert(--inf_loop_check > 0, "no host_klass loop");
}
......@@ -551,7 +551,8 @@ bool Reflection::verify_field_access(Klass* current_class,
if (access.is_protected()) {
if (!protected_restriction) {
// See if current_class (or outermost host class) is a subclass of field_class
if (host_class->is_subclass_of(field_class)) {
// An interface may not access protected members of j.l.Object
if (!host_class->is_interface() && host_class->is_subclass_of(field_class)) {
if (access.is_static() || // static fields are ok, see 6622385
current_class == resolved_class ||
field_class == resolved_class ||
......
......@@ -51,14 +51,6 @@ size_t MallocMemorySnapshot::total_arena() const {
return amount;
}
void MallocMemorySnapshot::reset() {
_tracking_header.reset();
for (int index = 0; index < mt_number_of_types; index ++) {
_malloc[index].reset();
}
}
// Make adjustment by subtracting chunks used by arenas
// from total chunks to get total free chunck size
void MallocMemorySnapshot::make_adjustment() {
......@@ -116,14 +108,9 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) {
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert(from != NMT_off, "Can not transition from off state");
assert(to != NMT_off, "Can not transition to off state");
if (from == NMT_minimal) {
MallocMemorySummary::reset();
}
assert (from != NMT_minimal, "cannot transition from minimal state");
if (to == NMT_detail) {
assert(from == NMT_minimal || from == NMT_summary, "Just check");
return MallocSiteTable::initialize();
} else if (from == NMT_detail) {
if (from == NMT_detail) {
assert(to == NMT_minimal || to == NMT_summary, "Just check");
MallocSiteTable::shutdown();
}
......
......@@ -51,14 +51,6 @@ class MemoryCounter VALUE_OBJ_CLASS_SPEC {
DEBUG_ONLY(_peak_size = 0;)
}
// Reset counters
void reset() {
_size = 0;
_count = 0;
DEBUG_ONLY(_peak_size = 0;)
DEBUG_ONLY(_peak_count = 0;)
}
inline void allocate(size_t sz) {
Atomic::add(1, (volatile MemoryCounterType*)&_count);
if (sz > 0) {
......@@ -124,11 +116,6 @@ class MallocMemory VALUE_OBJ_CLASS_SPEC {
_arena.resize(sz);
}
void reset() {
_malloc.reset();
_arena.reset();
}
inline size_t malloc_size() const { return _malloc.size(); }
inline size_t malloc_count() const { return _malloc.count();}
inline size_t arena_size() const { return _arena.size(); }
......@@ -176,8 +163,6 @@ class MallocMemorySnapshot : public ResourceObj {
return s->by_type(mtThreadStack)->malloc_count();
}
void reset();
void copy_to(MallocMemorySnapshot* s) {
s->_tracking_header = _tracking_header;
for (int index = 0; index < mt_number_of_types; index ++) {
......@@ -240,11 +225,6 @@ class MallocMemorySummary : AllStatic {
return as_snapshot()->malloc_overhead()->size();
}
// Reset all counters to zero
static void reset() {
as_snapshot()->reset();
}
static MallocMemorySnapshot* as_snapshot() {
return (MallocMemorySnapshot*)_snapshot;
}
......
......@@ -90,10 +90,6 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
_class_count(0) {
}
~MemBaseline() {
reset();
}
bool baseline(bool summaryOnly = true);
BaselineType baseline_type() const { return _baseline_type; }
......@@ -169,8 +165,7 @@ class MemBaseline VALUE_OBJ_CLASS_SPEC {
// reset the baseline for reuse
void reset() {
_baseline_type = Not_baselined;
_malloc_memory_snapshot.reset();
_virtual_memory_snapshot.reset();
// _malloc_memory_snapshot and _virtual_memory_snapshot are copied over.
_class_count = 0;
_malloc_sites.clear();
......
......@@ -96,20 +96,6 @@ class MemSummaryReporter : public MemReporterBase {
size_t _class_count;
public:
// Report summary tracking data from global snapshots directly.
// This constructor is used for final reporting and hs_err reporting.
MemSummaryReporter(MallocMemorySnapshot* malloc_snapshot,
VirtualMemorySnapshot* vm_snapshot, outputStream* output,
size_t class_count = 0, size_t scale = K) :
MemReporterBase(output, scale),
_malloc_snapshot(malloc_snapshot),
_vm_snapshot(vm_snapshot) {
if (class_count == 0) {
_class_count = InstanceKlass::number_of_instance_classes();
} else {
_class_count = class_count;
}
}
// This constructor is for normal reporting from a recent baseline.
MemSummaryReporter(MemBaseline& baseline, outputStream* output,
size_t scale = K) : MemReporterBase(output, scale),
......
......@@ -144,11 +144,9 @@ void Tracker::record(address addr, size_t size) {
}
// Shutdown can only be issued via JCmd, and NMT JCmd is serialized
// by lock
// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
void MemTracker::shutdown() {
// We can only shutdown NMT to minimal tracking level if it is
// ever on.
// We can only shutdown NMT to minimal tracking level if it is ever on.
if (tracking_level () > NMT_minimal) {
transition_to(NMT_minimal);
}
......@@ -157,45 +155,36 @@ void MemTracker::shutdown() {
bool MemTracker::transition_to(NMT_TrackingLevel level) {
NMT_TrackingLevel current_level = tracking_level();
assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
if (current_level == level) {
return true;
} else if (current_level > level) {
// Downgrade tracking level, we want to lower the tracking
// level first
// Downgrade tracking level, we want to lower the tracking level first
_tracking_level = level;
// Make _tracking_level visible immediately.
OrderAccess::fence();
VirtualMemoryTracker::transition(current_level, level);
MallocTracker::transition(current_level, level);
if (level == NMT_minimal) _baseline.reset();
} else {
VirtualMemoryTracker::transition(current_level, level);
MallocTracker::transition(current_level, level);
_tracking_level = level;
// Make _tracking_level visible immediately.
OrderAccess::fence();
// Upgrading tracking level is not supported and has never been supported.
// Allocating and deallocating malloc tracking structures is not thread safe and
// leads to inconsistencies unless a lot coarser locks are added.
}
return true;
}
void MemTracker::final_report(outputStream* output) {
assert(output != NULL, "No output stream");
if (tracking_level() >= NMT_summary) {
MallocMemorySnapshot* malloc_memory_snapshot =
MallocMemorySummary::as_snapshot();
malloc_memory_snapshot->make_adjustment();
VirtualMemorySnapshot* virtual_memory_snapshot =
VirtualMemorySummary::as_snapshot();
MemSummaryReporter rptr(malloc_memory_snapshot,
virtual_memory_snapshot, output);
rptr.report();
// shutdown NMT, the data no longer accurate
shutdown();
void MemTracker::report(bool summary_only, outputStream* output) {
assert(output != NULL, "No output stream");
MemBaseline baseline;
if (baseline.baseline(summary_only)) {
if (summary_only) {
MemSummaryReporter rpt(baseline, output);
rpt.report();
} else {
MemDetailReporter rpt(baseline, output);
rpt.report();
}
}
}
......
......@@ -70,6 +70,7 @@ class MemTracker : AllStatic {
static inline void release_thread_stack(void* addr, size_t size) { }
static void final_report(outputStream*) { }
static void error_report(outputStream*) { }
};
#else
......@@ -270,13 +271,20 @@ class MemTracker : AllStatic {
// other tools.
static inline Mutex* query_lock() { return _query_lock; }
// Make a final report and shutdown.
// This function generates summary report without creating snapshots,
// to avoid additional memory allocation. It uses native memory summary
// counters, and makes adjustment to them, once the adjustment is made,
// the counters are no longer accurate. As the result, this function
// should only be used for final reporting before shutting down.
static void final_report(outputStream*);
// Make a final report or report for hs_err file.
static void error_report(outputStream* output) {
if (tracking_level() >= NMT_summary) {
report(true, output); // just print summary for error case.
}
}
static void final_report(outputStream* output) {
NMT_TrackingLevel level = tracking_level();
if (level >= NMT_summary) {
report(level == NMT_summary, output);
}
}
// Stored baseline
static inline MemBaseline& get_baseline() {
......@@ -291,6 +299,7 @@ class MemTracker : AllStatic {
private:
static NMT_TrackingLevel init_tracking_level();
static void report(bool summary_only, outputStream* output);
private:
// Tracking level
......
......@@ -443,26 +443,28 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != NULL, "Sanity check");
ThreadCritical tc;
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
while (head != NULL) {
const ReservedMemoryRegion* rgn = head->peek();
if (!walker->do_allocation_site(rgn)) {
return false;
// Check that the _reserved_regions haven't been deleted.
if (_reserved_regions != NULL) {
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
while (head != NULL) {
const ReservedMemoryRegion* rgn = head->peek();
if (!walker->do_allocation_site(rgn)) {
return false;
}
head = head->next();
}
head = head->next();
}
}
return true;
}
// Transition virtual memory tracking level.
bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
if (from == NMT_minimal) {
assert(to == NMT_summary || to == NMT_detail, "Just check");
VirtualMemorySummary::reset();
} else if (to == NMT_minimal) {
assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
if (to == NMT_minimal) {
assert(from == NMT_summary || from == NMT_detail, "Just check");
// Clean up virtual memory tracking data structures.
ThreadCritical tc;
// Check for potential race with other thread calling transition
if (_reserved_regions != NULL) {
delete _reserved_regions;
_reserved_regions = NULL;
......
......@@ -62,11 +62,6 @@ class VirtualMemory VALUE_OBJ_CLASS_SPEC {
_committed -= sz;
}
void reset() {
_reserved = 0;
_committed = 0;
}
inline size_t reserved() const { return _reserved; }
inline size_t committed() const { return _committed; }
};
......@@ -123,12 +118,6 @@ class VirtualMemorySnapshot : public ResourceObj {
return amount;
}
inline void reset() {
for (int index = 0; index < mt_number_of_types; index ++) {
_virtual_memory[index].reset();
}
}
void copy_to(VirtualMemorySnapshot* s) {
for (int index = 0; index < mt_number_of_types; index ++) {
s->_virtual_memory[index] = _virtual_memory[index];
......@@ -174,10 +163,6 @@ class VirtualMemorySummary : AllStatic {
as_snapshot()->copy_to(s);
}
static inline void reset() {
as_snapshot()->reset();
}
static VirtualMemorySnapshot* as_snapshot() {
return (VirtualMemorySnapshot*)_snapshot;
}
......
......@@ -774,7 +774,7 @@ void VMError::report(outputStream* st) {
STEP(228, "(Native Memory Tracking)" )
if (_verbose) {
MemTracker::final_report(st);
MemTracker::error_report(st);
}
STEP(230, "" )
......
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/**
* @test
* @bug 8059299
* @summary assert(adr_type != NULL) failed: expecting TypeKlassPtr
* @run main/othervm -Xbatch CatchInlineExceptions
*/
class Exception1 extends Exception {};
class Exception2 extends Exception {};
public class CatchInlineExceptions {
private static int counter0;
private static int counter1;
private static int counter2;
private static int counter;
static void foo(int i) throws Exception {
if ((i & 1023) == 2) {
counter0++;
throw new Exception2();
}
}
static void test(int i) throws Exception {
try {
foo(i);
}
catch (Exception e) {
if (e instanceof Exception1) {
counter1++;
} else if (e instanceof Exception2) {
counter2++;
}
counter++;
throw e;
}
}
public static void main(String[] args) throws Throwable {
for (int i = 0; i < 15000; i++) {
try {
test(i);
} catch (Exception e) {
// expected
}
}
if (counter1 != 0) {
throw new RuntimeException("Failed: counter1(" + counter1 + ") != 0");
}
if (counter2 != counter0) {
throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter0(" + counter0 + ")");
}
if (counter2 != counter) {
throw new RuntimeException("Failed: counter2(" + counter2 + ") != counter(" + counter + ")");
}
System.out.println("TEST PASSED");
}
}
/*
* Copyright (c) 2013, 2014, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8059100
* @summary Test that you can decrease NMT tracking level but not increase it.
* @key nmt
* @library /testlibrary /testlibrary/whitebox
* @build ChangeTrackingLevel
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* sun.hotspot.WhiteBox$WhiteBoxPermission
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ChangeTrackingLevel
*/
import com.oracle.java.testlibrary.*;
import sun.hotspot.WhiteBox;
public class ChangeTrackingLevel {
public static WhiteBox wb = WhiteBox.getWhiteBox();
public static void main(String args[]) throws Exception {
boolean testChangeLevel = wb.NMTChangeTrackingLevel();
if (testChangeLevel) {
System.out.println("NMT level change test passed.");
} else {
// it also fails if the VM asserts.
throw new RuntimeException("NMT level change test failed");
}
}
};
......@@ -101,6 +101,7 @@ public class WhiteBox {
public native void NMTOverflowHashBucket(long num);
public native long NMTMallocWithPseudoStack(long size, int index);
public native boolean NMTIsDetailSupported();
public native boolean NMTChangeTrackingLevel();
// Compiler
public native void deoptimizeAll();
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册