提交 d01f510b 编写于 作者: J jwilhelm

Merge

......@@ -93,6 +93,47 @@ void os::wait_for_keypress_at_exit(void) {
return;
}
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
// rather than unmapping and remapping the whole chunk to get requested alignment.
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
char* aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
// [ | | ]
// ^ extra_base
// ^ extra_base + begin_offset == aligned_base
// extra_base + begin_offset + size ^
// extra_base + extra_size ^
// |<>| == begin_offset
// end_offset == |<>|
size_t begin_offset = aligned_base - extra_base;
size_t end_offset = (extra_base + extra_size) - (aligned_base + size);
if (begin_offset > 0) {
os::release_memory(extra_base, begin_offset);
}
if (end_offset > 0) {
os::release_memory(extra_base + begin_offset + size, end_offset);
}
return aligned_base;
}
void os::Posix::print_load_average(outputStream* st) {
st->print("load average:");
double loadavg[3];
......
......@@ -2895,6 +2895,36 @@ void os::pd_split_reserved_memory(char *base, size_t size, size_t split,
}
}
// Multiple threads can race in this code but it's not possible to unmap small sections of
// virtual space to get requested alignment, like posix-like os's.
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
char* os::reserve_memory_aligned(size_t size, size_t alignment) {
assert((alignment & (os::vm_allocation_granularity() - 1)) == 0,
"Alignment must be a multiple of allocation granularity (page size)");
assert((size & (alignment -1)) == 0, "size must be 'alignment' aligned");
size_t extra_size = size + alignment;
assert(extra_size >= size, "overflow, size is too large to allow alignment");
char* aligned_base = NULL;
do {
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) {
return NULL;
}
// Do manual alignment
aligned_base = (char*) align_size_up((uintptr_t) extra_base, alignment);
os::release_memory(extra_base, extra_size);
aligned_base = os::reserve_memory(size, aligned_base);
} while (aligned_base == NULL);
return aligned_base;
}
char* os::pd_reserve_memory(size_t bytes, char* addr, size_t alignment_hint) {
assert((size_t)addr % os::vm_allocation_granularity() == 0,
"reserve alignment");
......
......@@ -202,7 +202,7 @@ template <MEMFLAGS F> class CHeapObj ALLOCATION_SUPER_CLASS_SPEC {
// Calling new or delete will result in fatal error.
class StackObj ALLOCATION_SUPER_CLASS_SPEC {
public:
private:
void* operator new(size_t size);
void operator delete(void* p);
};
......@@ -226,7 +226,7 @@ class StackObj ALLOCATION_SUPER_CLASS_SPEC {
// be defined as a an empty string "".
//
class _ValueObj {
public:
private:
void* operator new(size_t size);
void operator delete(void* p);
};
......
......@@ -2192,11 +2192,6 @@ void SpaceManager::mangle_freed_chunks() {
// MetaspaceAux
size_t MetaspaceAux::used_in_bytes() {
return (Metaspace::class_space_list()->used_words_sum() +
Metaspace::space_list()->used_words_sum()) * BytesPerWord;
}
size_t MetaspaceAux::used_in_bytes(Metaspace::MetadataType mdtype) {
size_t used = 0;
ClassLoaderDataGraphMetaspaceIterator iter;
......@@ -2222,14 +2217,6 @@ size_t MetaspaceAux::free_in_bytes(Metaspace::MetadataType mdtype) {
return free * BytesPerWord;
}
// The total words available for metadata allocation. This
// uses Metaspace capacity_words() which is the total words
// in chunks allocated for a Metaspace.
size_t MetaspaceAux::capacity_in_bytes() {
return (Metaspace::class_space_list()->capacity_words_sum() +
Metaspace::space_list()->capacity_words_sum()) * BytesPerWord;
}
size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
size_t capacity = free_chunks_total(mdtype);
ClassLoaderDataGraphMetaspaceIterator iter;
......@@ -2242,11 +2229,6 @@ size_t MetaspaceAux::capacity_in_bytes(Metaspace::MetadataType mdtype) {
return capacity * BytesPerWord;
}
size_t MetaspaceAux::reserved_in_bytes() {
return (Metaspace::class_space_list()->virtual_space_total() +
Metaspace::space_list()->virtual_space_total()) * BytesPerWord;
}
size_t MetaspaceAux::reserved_in_bytes(Metaspace::MetadataType mdtype) {
size_t reserved = (mdtype == Metaspace::ClassType) ?
Metaspace::class_space_list()->virtual_space_total() :
......
......@@ -156,16 +156,25 @@ class MetaspaceAux : AllStatic {
public:
// Total of space allocated to metadata in all Metaspaces
static size_t used_in_bytes();
static size_t used_in_bytes() {
return used_in_bytes(Metaspace::ClassType) +
used_in_bytes(Metaspace::NonClassType);
}
// Total of available space in all Metaspaces
// Total of capacity allocated to all Metaspaces. This includes
// space in Metachunks not yet allocated and in the Metachunk
// freelist.
static size_t capacity_in_bytes();
static size_t capacity_in_bytes() {
return capacity_in_bytes(Metaspace::ClassType) +
capacity_in_bytes(Metaspace::NonClassType);
}
// Total space reserved in all Metaspaces
static size_t reserved_in_bytes();
static size_t reserved_in_bytes() {
return reserved_in_bytes(Metaspace::ClassType) +
reserved_in_bytes(Metaspace::NonClassType);
}
static size_t min_chunk_size();
......
......@@ -1839,15 +1839,16 @@ uint Node::match_edge(uint idx) const {
return idx; // True for other than index 0 (control)
}
static RegMask _not_used_at_all;
// Register classes are defined for specific machines
const RegMask &Node::out_RegMask() const {
ShouldNotCallThis();
return *(new RegMask());
return _not_used_at_all;
}
const RegMask &Node::in_RegMask(uint) const {
ShouldNotCallThis();
return *(new RegMask());
return _not_used_at_all;
}
//=============================================================================
......
......@@ -255,6 +255,7 @@ class os: AllStatic {
static int vm_allocation_granularity();
static char* reserve_memory(size_t bytes, char* addr = 0,
size_t alignment_hint = 0);
static char* reserve_memory_aligned(size_t size, size_t alignment);
static char* attempt_reserve_memory_at(size_t bytes, char* addr);
static void split_reserved_memory(char *base, size_t size,
size_t split, bool realloc);
......
......@@ -3527,11 +3527,12 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
java_lang_Thread::set_thread_status(thread_object,
java_lang_Thread::RUNNABLE);
// The VM preresolve methods to these classes. Make sure that get initialized
initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK_0);
// The VM creates & returns objects of this class. Make sure it's initialized.
initialize_class(vmSymbols::java_lang_Class(), CHECK_0);
// The VM preresolves methods to these classes. Make sure that they get initialized
initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK_0);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK_0);
call_initializeSystemClass(CHECK_0);
// get the Java runtime name after java.lang.System is initialized
......
......@@ -329,20 +329,9 @@ void ReservedSpace::initialize(size_t size, size_t alignment, bool large,
if ((((size_t)base + noaccess_prefix) & (alignment - 1)) != 0) {
// Base not aligned, retry
if (!os::release_memory(base, size)) fatal("os::release_memory failed");
// Reserve size large enough to do manual alignment and
// increase size to a multiple of the desired alignment
// Make sure that size is aligned
size = align_size_up(size, alignment);
size_t extra_size = size + alignment;
do {
char* extra_base = os::reserve_memory(extra_size, NULL, alignment);
if (extra_base == NULL) return;
// Do manual alignement
base = (char*) align_size_up((uintptr_t) extra_base, alignment);
assert(base >= extra_base, "just checking");
// Re-reserve the region at the aligned base address.
os::release_memory(extra_base, extra_size);
base = os::reserve_memory(size, base);
} while (base == NULL);
base = os::reserve_memory_aligned(size, alignment);
if (requested_address != 0 &&
failed_to_reserve_as_requested(base, requested_address, size, false)) {
......
......@@ -334,7 +334,7 @@ class MemBaseline : public _ValueObj {
// create a memory baseline
MemBaseline();
virtual ~MemBaseline();
~MemBaseline();
inline bool baselined() const {
return _baselined;
......
......@@ -90,7 +90,7 @@ protected:
NOT_PRODUCT(_name = name);
_counter = 0;
}
virtual ~AbstractGangTask() { }
~AbstractGangTask() { }
public:
};
......
......@@ -106,7 +106,7 @@ protected:
_status(INACTIVE),
_gang(NULL) { }
virtual ~YieldingFlexibleGangTask() { }
~YieldingFlexibleGangTask() { }
friend class YieldingFlexibleWorkGang;
friend class YieldingFlexibleGangWorker;
......
/*
* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/* @test ExecuteInternalVMTests
* @bug 8004691
* @summary Add a jtreg test that exercises the ExecuteInternalVMTests flag
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+ExecuteInternalVMTests ExecuteInternalVMTests
*/
public class ExecuteInternalVMTests {
public static void main(String[] args) throws Exception {
// The tests that are run are the HotSpot internal tests which are
// executed only when the flag -XX:+ExecuteInternalVMTests is used.
// The flag -XX:+ExecuteInternalVMTests can only be used for
// non-product builds of HotSpot. Therefore, the flag
// -XX:+IgnoreUnrecognizedVMOptions is also used, which means that this
// test will do nothing on a product build.
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册