提交 8e20eb99 编写于 作者: H Hao Tang 提交者: Hao Tang

[MultiTenant] Support TenantHeapIsolation

Summary: ported heap isolation feature of MultiTenant to Dragonwell8

Test Plan: hotspot/test/multi-tenant/

Reviewed-by: luchsh, mmyxym

Issue: https://github.com/alibaba/dragonwell8/issues/90
上级 0d8749d1
...@@ -285,7 +285,12 @@ SUNWprivate_1.1 { ...@@ -285,7 +285,12 @@ SUNWprivate_1.1 {
# INSERT VTABLE SYMBOLS HERE # INSERT VTABLE SYMBOLS HERE
JVM_TenantContainerOf;
JVM_AttachToTenant; JVM_AttachToTenant;
JVM_CreateTenantAllocationContext;
JVM_DestroyTenantAllocationContext;
JVM_GetTenantOccupiedMemory;
local: local:
*; *;
......
...@@ -280,7 +280,11 @@ SUNWprivate_1.1 { ...@@ -280,7 +280,11 @@ SUNWprivate_1.1 {
# INSERT VTABLE SYMBOLS HERE # INSERT VTABLE SYMBOLS HERE
JVM_TenantContainerOf;
JVM_AttachToTenant; JVM_AttachToTenant;
JVM_CreateTenantAllocationContext;
JVM_DestroyTenantAllocationContext;
JVM_GetTenantOccupiedMemory;
local: local:
*; *;
......
...@@ -52,6 +52,10 @@ ...@@ -52,6 +52,10 @@
#include "runtime/vframe.hpp" #include "runtime/vframe.hpp"
#include "utilities/preserveException.hpp" #include "utilities/preserveException.hpp"
#if INCLUDE_ALL_GCS
#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
#endif // INCLUDE_ALL_GCS
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java) \ #define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java) \
...@@ -3247,6 +3251,13 @@ int java_lang_AssertionStatusDirectives::packages_offset; ...@@ -3247,6 +3251,13 @@ int java_lang_AssertionStatusDirectives::packages_offset;
int java_lang_AssertionStatusDirectives::packageEnabled_offset; int java_lang_AssertionStatusDirectives::packageEnabled_offset;
int java_lang_AssertionStatusDirectives::deflt_offset; int java_lang_AssertionStatusDirectives::deflt_offset;
int java_nio_Buffer::_limit_offset; int java_nio_Buffer::_limit_offset;
#if INCLUDE_ALL_GCS
int com_alibaba_tenant_TenantContainer::_allocation_context_offset;
int com_alibaba_tenant_TenantContainer::_tenant_id_offset;
int com_alibaba_tenant_TenantContainer::_tenant_state_offset;
int com_alibaba_tenant_TenantState::_static_state_offsets[com_alibaba_tenant_TenantState::TS_SIZE] = { 0 };
#endif // INCLUDE_ALL_GCS
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0; int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
int sun_reflect_ConstantPool::_oop_offset; int sun_reflect_ConstantPool::_oop_offset;
int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset; int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
...@@ -3306,6 +3317,73 @@ void java_nio_Buffer::compute_offsets() { ...@@ -3306,6 +3317,73 @@ void java_nio_Buffer::compute_offsets() {
compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature()); compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
} }
#if INCLUDE_ALL_GCS
// Support for com.alibaba.tenant.TenantContainer
void com_alibaba_tenant_TenantContainer::compute_offsets() {
Klass* k = SystemDictionary::com_alibaba_tenant_TenantContainer_klass();
assert(k != NULL, "Cannot find TenantContainer in current JDK");
compute_offset(_tenant_id_offset, k, vmSymbols::tenant_id_address(), vmSymbols::long_signature());
compute_offset(_allocation_context_offset, k, vmSymbols::allocation_context_address(), vmSymbols::long_signature());
compute_offset(_tenant_state_offset, k, vmSymbols::state_name(), vmSymbols::com_alibaba_tenant_TenantState_signature());
}
jlong com_alibaba_tenant_TenantContainer::get_tenant_id(oop obj) {
assert(obj != NULL, "TenantContainer object cannot be NULL");
return obj->long_field(_tenant_id_offset);
}
G1TenantAllocationContext* com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(oop obj) {
assert(obj != NULL, "TenantContainer object cannot be NULL");
return (G1TenantAllocationContext*)(obj->long_field(_allocation_context_offset));
}
void com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(oop obj, G1TenantAllocationContext* context) {
assert(obj != NULL, "TenantContainer object cannot be NULL");
obj->long_field_put(_allocation_context_offset, (jlong)context);
}
bool com_alibaba_tenant_TenantContainer::is_dead(oop obj) {
assert(obj != NULL, "TenantContainer object cannot be NULL");
int state = com_alibaba_tenant_TenantState::state_of(obj);
return state == com_alibaba_tenant_TenantState::TS_STOPPING
|| state == com_alibaba_tenant_TenantState::TS_DEAD;
}
oop com_alibaba_tenant_TenantContainer::get_tenant_state(oop obj) {
assert(obj != NULL, "TenantContainer object cannot be NULL");
obj->obj_field(_tenant_state_offset);
}
// Support for com.alibaba.tenant.TenantState
int com_alibaba_tenant_TenantState::state_of(oop tenant_obj) {
assert(tenant_obj != NULL, "TenantContainer ");
oop tenant_state = com_alibaba_tenant_TenantContainer::get_tenant_state(tenant_obj);
InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::com_alibaba_tenant_TenantState_klass());
for (int i = TS_STARTING; i < TS_SIZE; ++i) {
assert(_static_state_offsets[i] == i * heapOopSize, "Must have been initialized");
address addr = ik->static_field_addr(_static_state_offsets[i]);
oop o = NULL;
if (UseCompressedOops) {
o = oopDesc::load_decode_heap_oop((narrowOop*)addr);
} else {
o = oopDesc::load_decode_heap_oop((oop*)addr);
}
assert(!oopDesc::is_null(o), "sanity");
if (tenant_state == o) {
return i;
}
}
ShouldNotReachHere();
}
#endif // INCLUDE_ALL_GCS
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) { void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
if (_owner_offset != 0) return; if (_owner_offset != 0) return;
...@@ -3415,6 +3493,10 @@ void JavaClasses::compute_offsets() { ...@@ -3415,6 +3493,10 @@ void JavaClasses::compute_offsets() {
// generated interpreter code wants to know about the offsets we just computed: // generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values(); AbstractAssembler::update_delayed_values();
if(MultiTenant) {
com_alibaba_tenant_TenantContainer::compute_offsets();
}
} }
#ifndef PRODUCT #ifndef PRODUCT
......
...@@ -1404,6 +1404,45 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic { ...@@ -1404,6 +1404,45 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
static oop get_owner_threadObj(oop obj); static oop get_owner_threadObj(oop obj);
}; };
#if INCLUDE_ALL_GCS
class G1TenantAllocationContext;
class com_alibaba_tenant_TenantContainer : AllStatic {
private:
static int _tenant_id_offset;
static int _allocation_context_offset;
static int _tenant_state_offset;
public:
static jlong get_tenant_id(oop obj);
static G1TenantAllocationContext* get_tenant_allocation_context(oop obj);
static void set_tenant_allocation_context(oop obj, G1TenantAllocationContext* context);
static oop get_tenant_state(oop obj);
static bool is_dead(oop obj);
static void compute_offsets();
};
class com_alibaba_tenant_TenantState : AllStatic {
friend class JavaClasses;
public:
// C++ level definition of tenant status
enum {
TS_STARTING = 0,
TS_RUNNING = 1,
TS_STOPPING = 2,
TS_DEAD = 3,
TS_SIZE,
};
private:
// offsets
static int _static_state_offsets[TS_SIZE];
public:
static int state_of(oop tenant_obj);
};
#endif // INCLUDE_ALL_GCS
// Use to declare fields that need to be injected into Java classes // Use to declare fields that need to be injected into Java classes
// for the JVM to use. The name_index and signature_index are // for the JVM to use. The name_index and signature_index are
// declared in vmSymbols. The may_be_java flag is used to declare // declared in vmSymbols. The may_be_java flag is used to declare
......
...@@ -890,7 +890,8 @@ ...@@ -890,7 +890,8 @@
do_intrinsic(_updateByteBufferCRC32, java_util_zip_CRC32, updateByteBuffer_name, updateByteBuffer_signature, F_SN) \ do_intrinsic(_updateByteBufferCRC32, java_util_zip_CRC32, updateByteBuffer_name, updateByteBuffer_signature, F_SN) \
do_name( updateByteBuffer_name, "updateByteBuffer") \ do_name( updateByteBuffer_name, "updateByteBuffer") \
do_signature(updateByteBuffer_signature, "(IJII)I") \ do_signature(updateByteBuffer_signature, "(IJII)I") \
/* support for com.alibaba.tenant.TenantContainer */ \ \
/* support for com.alibaba.tenant.TenantContainer */ \
do_name( allocation_context_address, "allocationContext") \ do_name( allocation_context_address, "allocationContext") \
do_name( tenant_id_address, "tenantId") \ do_name( tenant_id_address, "tenantId") \
\ \
......
...@@ -49,6 +49,11 @@ public: ...@@ -49,6 +49,11 @@ public:
_allocation_regions = allocation_regions; _allocation_regions = allocation_regions;
} }
void increment_allocation_regions(uint allocation_regions) {
assert(TenantHeapIsolation, "pre-condition");
_allocation_regions += allocation_regions;
}
void set_collectionset_used_before(size_t used) { void set_collectionset_used_before(size_t used) {
_collectionset_used_before = used; _collectionset_used_before = used;
} }
...@@ -61,6 +66,13 @@ public: ...@@ -61,6 +66,13 @@ public:
_alloc_regions_used_before = used; _alloc_regions_used_before = used;
} }
// For multi-tenant mode, multiple calls to set_alloc_regions_used_before() may happen,
// thus change to below method to accumulate those results
void increment_alloc_regions_used_before(size_t used) {
assert(TenantHeapIsolation, "pre-condition");
_alloc_regions_used_before += used;
}
void set_bytes_copied(size_t copied) { void set_bytes_copied(size_t copied) {
_bytes_copied = copied; _bytes_copied = copied;
} }
......
...@@ -248,6 +248,11 @@ HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size, ...@@ -248,6 +248,11 @@ HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region, void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) { size_t allocated_bytes) {
DEBUG_ONLY(if (TenantHeapIsolation) {
assert(alloc_region->allocation_context() == allocation_context(),
"Inconsistent allocation contexts");
});
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes); _g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
} }
...@@ -259,6 +264,11 @@ HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size, ...@@ -259,6 +264,11 @@ HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region, void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) { size_t allocated_bytes) {
DEBUG_ONLY(if (TenantHeapIsolation) {
assert(alloc_region->allocation_context() == allocation_context(),
"HeapRegion's context should be same as SurvivorGCAllocRegion's");
});
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young); _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
} }
...@@ -270,6 +280,11 @@ HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size, ...@@ -270,6 +280,11 @@ HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region, void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) { size_t allocated_bytes) {
DEBUG_ONLY(if (TenantHeapIsolation) {
assert(alloc_region->allocation_context() == allocation_context(),
"HeapRegion's context should be same as OldGCAllocRegion's");
});
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old); _g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
} }
......
...@@ -144,8 +144,18 @@ public: ...@@ -144,8 +144,18 @@ public:
return (hr == _dummy_region) ? NULL : hr; return (hr == _dummy_region) ? NULL : hr;
} }
void set_allocation_context(AllocationContext_t context) { _allocation_context = context; } void set_allocation_context(AllocationContext_t context) {
AllocationContext_t allocation_context() { return _allocation_context; } _allocation_context = context;
}
const AllocationContext_t& allocation_context() const {
return _allocation_context;
}
const G1TenantAllocationContext* tenant_allocation_context() const {
assert(TenantHeapIsolation, "pre-condition");
return allocation_context().tenant_allocation_context();
}
uint count() { return _count; } uint count() { return _count; }
......
...@@ -60,6 +60,15 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size, ...@@ -60,6 +60,15 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size,
HeapRegion* alloc_region = _alloc_region; HeapRegion* alloc_region = _alloc_region;
assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly")); assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
DEBUG_ONLY(if (TenantHeapIsolation
/* (_alloc_region == _dummy_region) means current AllocRegion has not yet
* really initialized
*/
&& alloc_region != G1AllocRegion::_dummy_region) {
assert(allocation_context() == alloc_region->allocation_context(),
"Tring to allocate in the wrong heap region");
});
HeapWord* result = par_allocate(alloc_region, word_size, bot_updates); HeapWord* result = par_allocate(alloc_region, word_size, bot_updates);
if (result != NULL) { if (result != NULL) {
trace("alloc", word_size, result); trace("alloc", word_size, result);
...@@ -82,6 +91,12 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size, ...@@ -82,6 +91,12 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
retire(true /* fill_up */); retire(true /* fill_up */);
result = new_alloc_region_and_allocate(word_size, false /* force */); result = new_alloc_region_and_allocate(word_size, false /* force */);
if (result != NULL) { if (result != NULL) {
DEBUG_ONLY(if (TenantHeapIsolation) {
// _alloc_region was updated, check its tenant alloc context
assert(allocation_context() == _alloc_region->allocation_context(),
"Allocate in wrong region");
});
trace("alloc locked (second attempt)", word_size, result); trace("alloc locked (second attempt)", word_size, result);
return result; return result;
} }
...@@ -97,6 +112,12 @@ inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size, ...@@ -97,6 +112,12 @@ inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size,
trace("forcing alloc"); trace("forcing alloc");
HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */); HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
if (result != NULL) { if (result != NULL) {
DEBUG_ONLY(if (TenantHeapIsolation) {
// _alloc_region was updated, check its tenant alloc context
assert(allocation_context() == _alloc_region->allocation_context(),
"Allocate in wrong region");
});
trace("alloc forced", word_size, result); trace("alloc forced", word_size, result);
return result; return result;
} }
......
/*
* Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Alibaba designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "runtime/thread.hpp"
// 0 will be returned if in ROOT tenant or memory isolation not enabled
AllocationContext_t AllocationContext::_root_context;
AllocationContext_t AllocationContext::current() {
return Thread::current()->allocation_context();
}
AllocationContext_t AllocationContext::system() {
return _root_context;
}
AllocationContextMark::AllocationContextMark(AllocationContext_t ctxt)
: _saved_context(Thread::current()->allocation_context()) {
Thread* thrd = Thread::current();
thrd->set_allocation_context(ctxt);
}
AllocationContextMark::~AllocationContextMark() {
Thread* thrd = Thread::current();
thrd->set_allocation_context(_saved_context);
}
...@@ -26,19 +26,105 @@ ...@@ -26,19 +26,105 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "utilities/hashtable.hpp"
typedef unsigned char AllocationContext_t; class G1TenantAllocationContext;
/*
* Typical scenario to use AllocationContext_t:
* <code>_g1h->_allocator->mutator_alloc_buffer(alloc_context)->attempt_allocation(...)</code>
*
* Here we just simply make AllocationContext_t to contain a pointer of
* G1TenantAllocationContext if TenantHeapIsolation enabled
*
*/
class AllocationContext_t VALUE_OBJ_CLASS_SPEC {
private:
union {
volatile G1TenantAllocationContext* _tenant_alloc_context; // Pointer to corresponding tenant allocation context
unsigned char _alloc_context; // unused, original value type from OpenJDK
} _value;
public:
AllocationContext_t(const uint64_t val) {
_value._tenant_alloc_context = (G1TenantAllocationContext*)val;
}
AllocationContext_t() { _value._tenant_alloc_context = NULL; }
AllocationContext_t(const AllocationContext_t& peer) : _value(peer._value) { }
AllocationContext_t(G1TenantAllocationContext* ctxt) {
Atomic::store_ptr(ctxt, &_value._tenant_alloc_context);
}
G1TenantAllocationContext* tenant_allocation_context() const {
assert(TenantHeapIsolation, "pre-condition");
return (G1TenantAllocationContext*)_value._tenant_alloc_context;
}
// This method is useless for now, since the original implementation does not differentiate
// system & current allocation context.
// if Oracle makes any changes to AllocationContext_t in future, pls update below method as well
const unsigned char allocation_context() const { return 0; }
// Comparing the longest field in union
// operator ==
inline bool operator ==(const AllocationContext_t& ctxt) const {
return _value._tenant_alloc_context == ctxt._value._tenant_alloc_context;
}
inline bool operator ==(const G1TenantAllocationContext* tac) const {
return _value._tenant_alloc_context == tac;
}
inline bool operator ==(unsigned char alloc_context) const {
return _value._alloc_context == alloc_context;
}
// operator !=
inline bool operator !=(const AllocationContext_t& ctxt) const {
return _value._tenant_alloc_context != ctxt._value._tenant_alloc_context;
}
inline bool operator !=(const G1TenantAllocationContext* tac) const {
return _value._tenant_alloc_context != tac;
}
inline bool operator !=(unsigned char alloc_context) const {
return _value._alloc_context != alloc_context;
}
// operator =
inline AllocationContext_t& operator =(const AllocationContext_t& ctxt) {
Atomic::store_ptr((void*)ctxt._value._tenant_alloc_context, &_value._tenant_alloc_context);
return *this;
}
inline AllocationContext_t& operator =(const G1TenantAllocationContext* tac) {
Atomic::store_ptr(const_cast<G1TenantAllocationContext*>(tac), &_value._tenant_alloc_context);
return *this;
}
inline AllocationContext_t& operator =(unsigned char alloc_context) {
_value._alloc_context = alloc_context;
return *this;
}
inline const bool is_system() const { return NULL == _value._tenant_alloc_context; }
// to enable AllocationContext_t to be used as key type in HashMap
unsigned int hash_code() {
void *p = (void*)_value._tenant_alloc_context;
return HashMapUtil::hash(p);
}
inline G1TenantAllocationContext* operator -> () const {
return tenant_allocation_context();
}
};
class AllocationContext : AllStatic { class AllocationContext : AllStatic {
private:
static AllocationContext_t _root_context;
public: public:
// Currently used context // Currently used context
static AllocationContext_t current() { static AllocationContext_t current();
return 0;
}
// System wide default context // System wide default context
static AllocationContext_t system() { static AllocationContext_t system();
return 0;
}
}; };
class AllocationContextStats: public StackObj { class AllocationContextStats: public StackObj {
...@@ -49,4 +135,13 @@ public: ...@@ -49,4 +135,13 @@ public:
inline bool available() { return false; } inline bool available() { return false; }
}; };
// To switch current to target AllocationContext_t during the lifespan of this object
class AllocationContextMark : public StackObj {
private:
AllocationContext_t _saved_context;
public:
AllocationContextMark(AllocationContext_t ctxt);
~AllocationContextMark();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
...@@ -30,21 +30,85 @@ ...@@ -30,21 +30,85 @@
#include "gc_implementation/g1/heapRegionSet.inline.hpp" #include "gc_implementation/g1/heapRegionSet.inline.hpp"
void G1DefaultAllocator::init_mutator_alloc_region() { void G1DefaultAllocator::init_mutator_alloc_region() {
if (TenantHeapIsolation) {
G1TenantAllocationContexts::init_mutator_alloc_regions();
}
assert(_mutator_alloc_region.get() == NULL, "pre-condition"); assert(_mutator_alloc_region.get() == NULL, "pre-condition");
_mutator_alloc_region.init(); _mutator_alloc_region.init();
} }
void G1DefaultAllocator::release_mutator_alloc_region() { void G1DefaultAllocator::release_mutator_alloc_region() {
if (TenantHeapIsolation) {
G1TenantAllocationContexts::release_mutator_alloc_regions();
}
_mutator_alloc_region.release(); _mutator_alloc_region.release();
assert(_mutator_alloc_region.get() == NULL, "post-condition"); assert(_mutator_alloc_region.get() == NULL, "post-condition");
} }
MutatorAllocRegion* G1DefaultAllocator::mutator_alloc_region(AllocationContext_t context) {
if (TenantHeapIsolation && !context.is_system()) {
G1TenantAllocationContext* tac = context.tenant_allocation_context();
assert(NULL != tac, "Tenant alloc context cannot be NULL");
return tac->mutator_alloc_region();
}
return &_mutator_alloc_region;
}
SurvivorGCAllocRegion* G1DefaultAllocator::survivor_gc_alloc_region(AllocationContext_t context) {
if (TenantHeapIsolation && !context.is_system()) {
G1TenantAllocationContext* tac = context.tenant_allocation_context();
assert(NULL != tac, "Tenant alloc context cannot be NULL");
return tac->survivor_gc_alloc_region();
}
return &_survivor_gc_alloc_region;
}
OldGCAllocRegion* G1DefaultAllocator::old_gc_alloc_region(AllocationContext_t context) {
if (TenantHeapIsolation && !context.is_system()) {
G1TenantAllocationContext* tac = context.tenant_allocation_context();
assert(NULL != tac, "Tenant alloc context cannot be NULL");
return tac->old_gc_alloc_region();
}
return &_old_gc_alloc_region;
}
size_t G1DefaultAllocator::used() {
assert(Heap_lock->owner() != NULL,
"Should be owned on this thread's behalf.");
size_t result = _summary_bytes_used;
if (TenantHeapIsolation) {
// root tenant's
HeapRegion* hr = mutator_alloc_region(AllocationContext::system())->get();
if (NULL != hr) { result += hr->used(); }
result += G1TenantAllocationContexts::total_used();
} else {
// TenantHeapIsolation disabled mode
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
if (hr != NULL) {
result += hr->used();
}
}
return result;
}
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old, OldGCAllocRegion* old,
HeapRegion** retained_old) { HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old; HeapRegion* retained_region = *retained_old;
*retained_old = NULL; *retained_old = NULL;
AllocationContext_t context = old->allocation_context();
DEBUG_ONLY(if (TenantHeapIsolation && NULL != retained_region) {
assert(context == retained_region->allocation_context(),
"Inconsistent tenant alloc contexts");
});
// We will discard the current GC alloc region if: // We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!), // a) it's in the collection set (it can happen!),
// b) it's already full (no point in using it), // b) it's already full (no point in using it),
...@@ -69,7 +133,13 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info, ...@@ -69,7 +133,13 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
retained_region->note_start_of_copying(during_im); retained_region->note_start_of_copying(during_im);
old->set(retained_region); old->set(retained_region);
_g1h->_hr_printer.reuse(retained_region); _g1h->_hr_printer.reuse(retained_region);
evacuation_info.set_alloc_regions_used_before(retained_region->used());
// Do accumulation in tenant mode, otherwise just set it
if (TenantHeapIsolation) {
evacuation_info.increment_alloc_regions_used_before(retained_region->used());
} else {
evacuation_info.set_alloc_regions_used_before(retained_region->used());
}
} }
} }
...@@ -81,10 +151,22 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info) ...@@ -81,10 +151,22 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info)
reuse_retained_old_region(evacuation_info, reuse_retained_old_region(evacuation_info,
&_old_gc_alloc_region, &_old_gc_alloc_region,
&_retained_old_gc_alloc_region); &_retained_old_gc_alloc_region);
if (TenantHeapIsolation) {
// for non-root tenants
G1TenantAllocationContexts::init_gc_alloc_regions(this, evacuation_info);
}
} }
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) { void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
AllocationContext_t context = AllocationContext::current(); AllocationContext_t context = AllocationContext::current();
if (TenantHeapIsolation) {
// in non-tenant mode, system() == current(), AllocationContext::current() just works.
// but in tenant mode, we are trying to release all gc alloc regions from all tenants,
// thus explicitly overwrite the first operand context to system() like below.
context = AllocationContext::system();
}
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() + evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
old_gc_alloc_region(context)->count()); old_gc_alloc_region(context)->count());
survivor_gc_alloc_region(context)->release(); survivor_gc_alloc_region(context)->release();
...@@ -98,6 +180,11 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat ...@@ -98,6 +180,11 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
_retained_old_gc_alloc_region->record_retained_region(); _retained_old_gc_alloc_region->record_retained_region();
} }
// Release GC alloc region for non-root tenants
if (TenantHeapIsolation) {
G1TenantAllocationContexts::release_gc_alloc_regions(evacuation_info);
}
if (ResizePLAB) { if (ResizePLAB) {
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); _g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
_g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers); _g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
...@@ -105,9 +192,32 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat ...@@ -105,9 +192,32 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
} }
void G1DefaultAllocator::abandon_gc_alloc_regions() { void G1DefaultAllocator::abandon_gc_alloc_regions() {
assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); DEBUG_ONLY(if (TenantHeapIsolation) {
assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition"); // in non-tenant mode, system() == current(), AllocationContext::current() just works.
// but in tenant mode, we are trying to release all gc alloc regions from all tenants,
// thus explicitly overwrite the first operand context to system() like below.
assert(survivor_gc_alloc_region(AllocationContext::system())->get() == NULL, "pre-condition");
assert(old_gc_alloc_region(AllocationContext::system())->get() == NULL, "pre-condition");
} else {
// original logic, untouched
assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
});
_retained_old_gc_alloc_region = NULL; _retained_old_gc_alloc_region = NULL;
if (TenantHeapIsolation) {
G1TenantAllocationContexts::abandon_gc_alloc_regions();
}
}
bool G1DefaultAllocator::is_retained_old_region(HeapRegion* hr) {
if (TenantHeapIsolation && NULL != hr && !hr->allocation_context().is_system()) {
G1TenantAllocationContext* tac = hr->allocation_context().tenant_allocation_context();
assert(NULL != tac, "pre-condition");
return tac->retained_old_gc_alloc_region() == hr;
}
return _retained_old_gc_alloc_region == hr;
} }
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) : G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
...@@ -138,15 +248,67 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest, ...@@ -138,15 +248,67 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
} }
} }
G1TenantParGCAllocBuffer::G1TenantParGCAllocBuffer(G1CollectedHeap* g1h,
AllocationContext_t ac)
: _allocation_context(ac)
, _g1h(g1h)
, _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young))
, _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
assert(TenantHeapIsolation, "pre-condition");
for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL;
}
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
}
G1ParGCAllocBuffer* G1TenantParGCAllocBuffer::alloc_buffer(InCSetState dest) {
assert(TenantHeapIsolation, "pre-condition");
assert(dest.is_valid(), "just checking");
return _alloc_buffers[dest.value()];
}
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) : G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h), G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)), _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
_tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) { _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)),
_tenant_par_alloc_buffers(NULL) {
for (uint state = 0; state < InCSetState::Num; state++) { for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL; _alloc_buffers[state] = NULL;
} }
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer; _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer; _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
if (TenantHeapIsolation) {
_tenant_par_alloc_buffers = new TenantBufferMap(G1TenantAllocationContexts::active_context_count());
}
}
G1TenantParGCAllocBuffer* G1DefaultParGCAllocator::tenant_par_alloc_buffer_of(AllocationContext_t ac) {
assert(TenantHeapIsolation, "pre-condition");
// slow path to traverse over all tenant buffers
assert(NULL != _tenant_par_alloc_buffers, "just checking");
if (_tenant_par_alloc_buffers->contains(ac)) {
assert(NULL != _tenant_par_alloc_buffers->get(ac), "pre-condition");
return _tenant_par_alloc_buffers->get(ac)->value();
}
return NULL;
}
G1DefaultParGCAllocator::~G1DefaultParGCAllocator() {
if (TenantHeapIsolation) {
assert(NULL != _tenant_par_alloc_buffers, "just checking");
for (TenantBufferMap::Iterator itr = _tenant_par_alloc_buffers->begin();
itr != _tenant_par_alloc_buffers->end(); ++itr) {
assert(!itr->key().is_system(), "pre-condition");
G1TenantParGCAllocBuffer* tbuf = itr->value();
delete tbuf;
}
_tenant_par_alloc_buffers->clear();
delete _tenant_par_alloc_buffers;
}
} }
void G1DefaultParGCAllocator::retire_alloc_buffers() { void G1DefaultParGCAllocator::retire_alloc_buffers() {
...@@ -158,5 +320,48 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() { ...@@ -158,5 +320,48 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() {
true /* end_of_gc */, true /* end_of_gc */,
false /* retain */); false /* retain */);
} }
if (TenantHeapIsolation) {
assert(NULL != _tenant_par_alloc_buffers, "just checking");
// retire all non-root buffers
for (TenantBufferMap::Iterator itr = _tenant_par_alloc_buffers->begin();
itr != _tenant_par_alloc_buffers->end(); ++itr) {
assert(!itr->key().is_system(), "pre-condition");
G1TenantParGCAllocBuffer* tbuf = itr->value();
assert(NULL != tbuf, "pre-condition");
G1ParGCAllocBuffer* buffer = tbuf->alloc_buffer(state);
if (buffer != NULL) {
add_to_alloc_buffer_waste(buffer->words_remaining());
buffer->flush_stats_and_retire(_g1h->alloc_buffer_stats(state), true, false);
}
}
} else {
assert(NULL == _tenant_par_alloc_buffers, "just checking");
}
}
}
G1ParGCAllocBuffer* G1DefaultParGCAllocator::alloc_buffer(InCSetState dest, AllocationContext_t context) {
assert(dest.is_valid(),
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
if (TenantHeapIsolation && !context.is_system()) {
assert(NULL != _tenant_par_alloc_buffers, "just checking");
G1TenantParGCAllocBuffer* tbuf = tenant_par_alloc_buffer_of(context);
if (NULL == tbuf) {
tbuf = new G1TenantParGCAllocBuffer(_g1h, context);
_tenant_par_alloc_buffers->put(context, tbuf);
}
assert(NULL != tbuf
&& NULL != _tenant_par_alloc_buffers->get(context)
&& tbuf == _tenant_par_alloc_buffers->get(context)->value(), "post-condition");
G1ParGCAllocBuffer* buf = tbuf->alloc_buffer(dest);
assert(NULL != buf, "post-condition");
return buf;
} }
assert(_alloc_buffers[dest.value()] != NULL,
err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
return _alloc_buffers[dest.value()];
} }
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "gc_implementation/g1/g1AllocRegion.hpp" #include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp" #include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp" #include "gc_implementation/shared/parGCAllocBuffer.hpp"
#include "utilities/hashtable.hpp"
#include "utilities/hashtable.inline.hpp"
// Base class for G1 allocators. // Base class for G1 allocators.
class G1Allocator : public CHeapObj<mtGC> { class G1Allocator : public CHeapObj<mtGC> {
...@@ -114,34 +116,15 @@ public: ...@@ -114,34 +116,15 @@ public:
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info); virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
virtual void abandon_gc_alloc_regions(); virtual void abandon_gc_alloc_regions();
virtual bool is_retained_old_region(HeapRegion* hr) { virtual bool is_retained_old_region(HeapRegion* hr);
return _retained_old_gc_alloc_region == hr;
}
virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) { virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context);
return &_mutator_alloc_region;
}
virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) { virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context);
return &_survivor_gc_alloc_region;
}
virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) { virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context);
return &_old_gc_alloc_region;
}
virtual size_t used() { virtual size_t used();
assert(Heap_lock->owner() != NULL,
"Should be owned on this thread's behalf.");
size_t result = _summary_bytes_used;
// Read only once in case it is set to NULL concurrently
HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
if (hr != NULL) {
result += hr->used();
}
return result;
}
}; };
class G1ParGCAllocBuffer: public ParGCAllocBuffer { class G1ParGCAllocBuffer: public ParGCAllocBuffer {
...@@ -209,6 +192,8 @@ public: ...@@ -209,6 +192,8 @@ public:
_alloc_buffer_waste(0), _undo_waste(0) { _alloc_buffer_waste(0), _undo_waste(0) {
} }
virtual ~G1ParGCAllocator() { }
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h); static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
size_t alloc_buffer_waste() { return _alloc_buffer_waste; } size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
...@@ -255,21 +240,51 @@ public: ...@@ -255,21 +240,51 @@ public:
} }
}; };
class G1DefaultParGCAllocator;
// To encapsulate per-tenant ParGCAllocBuffer for G1DefaultParGCAllocator to use
// during GC pause.
// NOTE: thread local object
class G1TenantParGCAllocBuffer : public CHeapObj<mtTenant> {
friend class G1DefaultParGCAllocator;
private:
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
AllocationContext_t _allocation_context; // NOTE: used during GC, be careful with dereferencing
G1CollectedHeap* _g1h;
private:
G1TenantParGCAllocBuffer(G1CollectedHeap* g1h, AllocationContext_t ac);
G1ParGCAllocBuffer* alloc_buffer(InCSetState dest);
AllocationContext_t allocation_context() { return _allocation_context; }
void set_allocation_context(AllocationContext_t ac) { _allocation_context = ac; }
};
class G1DefaultParGCAllocator : public G1ParGCAllocator { class G1DefaultParGCAllocator : public G1ParGCAllocator {
private:
// only for ROOT tenant if TenantHeapIsolation enabled
G1ParGCAllocBuffer _surviving_alloc_buffer; G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer; G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num]; G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
// Per-tenant par gc allocation buffers
typedef HashMap<AllocationContext_t, G1TenantParGCAllocBuffer*, mtTenant> TenantBufferMap;
TenantBufferMap* _tenant_par_alloc_buffers;
protected:
// returns tenant alloc buffer of target allocation context, NULL if not exist
G1TenantParGCAllocBuffer* tenant_par_alloc_buffer_of(AllocationContext_t ac);
public: public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h); G1DefaultParGCAllocator(G1CollectedHeap* g1h);
virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) { virtual ~G1DefaultParGCAllocator();
assert(dest.is_valid(),
err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value())); virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context);
assert(_alloc_buffers[dest.value()] != NULL,
err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
return _alloc_buffers[dest.value()];
}
virtual void retire_alloc_buffers() ; virtual void retire_alloc_buffers() ;
}; };
......
...@@ -68,6 +68,7 @@ ...@@ -68,6 +68,7 @@
#include "runtime/orderAccess.inline.hpp" #include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp" #include "runtime/vmThread.hpp"
#include "gc_implementation/g1/elasticHeap.hpp" #include "gc_implementation/g1/elasticHeap.hpp"
#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
...@@ -849,6 +850,14 @@ G1CollectedHeap::mem_allocate(size_t word_size, ...@@ -849,6 +850,14 @@ G1CollectedHeap::mem_allocate(size_t word_size,
} else { } else {
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count); result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
} }
#ifndef PRODUCT
if (TenantHeapIsolation && TraceNonRootTenantAllocation && !AllocationContext::current().is_system()) {
tty->print_cr("Non-root allocation: " SIZE_FORMAT " bytes @0x" PTR_FORMAT " in tenant 0x" PTR_FORMAT,
word_size * HeapWordSize, result, AllocationContext::current().tenant_allocation_context());
}
#endif
if (result != NULL) { if (result != NULL) {
return result; return result;
} }
...@@ -1972,6 +1981,11 @@ jint G1CollectedHeap::initialize() { ...@@ -1972,6 +1981,11 @@ jint G1CollectedHeap::initialize() {
// it will be used then. // it will be used then.
_hr_printer.set_active(G1PrintHeapRegions); _hr_printer.set_active(G1PrintHeapRegions);
// have to do this early before mutator_alloc_region initialization
if (TenantHeapIsolation) {
G1TenantAllocationContexts::initialize();
}
// While there are no constraints in the GC code that HeapWordSize // While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the // be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some // system which believe this to be true (e.g. oop->object_size in some
...@@ -7055,3 +7069,41 @@ void G1CollectedHeap::rebuild_strong_code_roots() { ...@@ -7055,3 +7069,41 @@ void G1CollectedHeap::rebuild_strong_code_roots() {
RebuildStrongCodeRootClosure blob_cl(this); RebuildStrongCodeRootClosure blob_cl(this);
CodeCache::blobs_do(&blob_cl); CodeCache::blobs_do(&blob_cl);
} }
void G1CollectedHeap::create_tenant_allocation_context(oop tenant_obj) {
assert(TenantHeapIsolation, "pre-condition");
assert(tenant_obj != NULL, "Tenant container object is null");
G1TenantAllocationContext* context = new (mtTenant) G1TenantAllocationContext(this);
assert(NULL != context, "Failed to create tenant context");
com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(tenant_obj, context);
context->set_tenant_container(tenant_obj);
}
void G1CollectedHeap::destroy_tenant_allocation_context(jlong context_val) {
assert(TenantHeapIsolation, "pre-condition");
G1TenantAllocationContext* context = (G1TenantAllocationContext*)context_val;
assert(NULL != context, "Delete an uninitialized tenant container");
oop tenant_obj = context->tenant_container();
assert(tenant_obj != NULL, "TenantContainer object cannot be NULL");
delete context;
com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(tenant_obj, NULL);
}
oop G1CollectedHeap::tenant_container_of(oop obj) {
assert(TenantHeapIsolation, "pre-condition");
if (obj != NULL) {
// Get: oop-> object address-> heap region -> tenant allocation context -> tenant obj
// assert obj
HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
if (NULL != hr) {
const G1TenantAllocationContext* context = hr->tenant_allocation_context();
if (NULL != context) {
return context->tenant_container();
}
}
}
return NULL;
}
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include "gc_implementation/g1/g1InCSetState.hpp" #include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp" #include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp" #include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionManager.hpp" #include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp" #include "gc_implementation/g1/heapRegionSet.hpp"
...@@ -1652,6 +1653,11 @@ public: ...@@ -1652,6 +1653,11 @@ public:
void print_cset_rsets() PRODUCT_RETURN; void print_cset_rsets() PRODUCT_RETURN;
void print_all_rsets() PRODUCT_RETURN; void print_all_rsets() PRODUCT_RETURN;
// Tenant allocation context manipulation
void create_tenant_allocation_context(oop tenant_obj);
void destroy_tenant_allocation_context(jlong context);
oop tenant_container_of(oop obj);
public: public:
size_t pending_card_num(); size_t pending_card_num();
size_t cards_scanned(); size_t cards_scanned();
......
...@@ -77,6 +77,9 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) : ...@@ -77,6 +77,9 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3); _gc_par_phases[RedirtyCards] = new WorkerDataArray<double>(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
_redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3); _redirtied_cards = new WorkerDataArray<size_t>(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards); _gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
// Cannot guard below line with TenantHeapIsolation since we do not have conditional compilation for tenant mode
_gc_par_phases[TenantAllocationContextRoots] = new WorkerDataArray<double>(max_gc_threads, "G1TenantAllocationContext Roots (ms)", true, G1Log::LevelFinest, 3);
} }
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) { void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
...@@ -90,6 +93,8 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress ...@@ -90,6 +93,8 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress
_gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled()); _gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
_gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled()); _gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
_gc_par_phases[TenantAllocationContextRoots]->set_enabled(TenantHeapIsolation);
} }
void G1GCPhaseTimes::note_gc_end() { void G1GCPhaseTimes::note_gc_end() {
......
...@@ -43,6 +43,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> { ...@@ -43,6 +43,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
GCWorkerStart, GCWorkerStart,
ExtRootScan, ExtRootScan,
ThreadRoots, ThreadRoots,
TenantAllocationContextRoots,
StringTableRoots, StringTableRoots,
UniverseRoots, UniverseRoots,
JNIRoots, JNIRoots,
......
...@@ -332,14 +332,67 @@ void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) { ...@@ -332,14 +332,67 @@ void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
dummy_free_list.remove_all(); dummy_free_list.remove_all();
} }
bool G1PrepareCompactClosure::is_cp_initialized_for(AllocationContext_t ac) {
assert_at_safepoint(true /* in vm thread */);
assert(TenantHeapIsolation, "pre-condition");
if (ac.is_system()) {
return _root_cp.space != NULL;
}
G1TenantAllocationContext* tac = ac.tenant_allocation_context();
assert(NULL != tac, "Tenant alloc context cannot be NULL");
return tac->cached_compact_point().space != NULL;
}
void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) { void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
// If this is the first live region that we came across which we can compact, // If this is the first live region that we came across which we can compact,
// initialize the CompactPoint. // initialize the CompactPoint.
if (!is_cp_initialized()) { // Otherwise if TenantHeapIsolation enabled, just load saved CompactPoint from
_cp.space = hr; // corresponding tenant context
_cp.threshold = hr->initialize_threshold(); if (TenantHeapIsolation) {
assert_at_safepoint(true /* in vm thread */);
AllocationContext_t ac = hr->allocation_context();
if (!is_cp_initialized_for(ac)) {
// first live region of this tenant
_cp.threshold = hr->initialize_threshold();
_cp.space = hr;
} else {
// if not the first time, should do switching
HeapRegion* cur_space = (HeapRegion*)_cp.space;
if (ac != cur_space->allocation_context()) {
// pick the corresponding saved compact compact points base on tenant alloc contexts
if (ac.is_system()) {
_cp = _root_cp;
} else {
G1TenantAllocationContext* tac = ac.tenant_allocation_context();
assert(NULL != tac, "just checking");
_cp = tac->cached_compact_point();
}
assert(NULL != _cp.space, "post-condition");
}
}
} else /* if (!TenantHeapIsolation) */ {
if (!is_cp_initialized()) {
// will be called only once during the whole iteration
_cp.space = hr;
_cp.threshold = hr->initialize_threshold();
}
} }
prepare_for_compaction_work(&_cp, hr, end); prepare_for_compaction_work(&_cp, hr, end);
// save current CompactPoint to corresponding tenant context
if (TenantHeapIsolation) {
assert(NULL != _cp.space, "pre-condition");
HeapRegion* cur_space = (HeapRegion*)_cp.space;
if (cur_space->allocation_context().is_system()) {
_root_cp = _cp;
} else {
G1TenantAllocationContext* tac = cur_space->allocation_context().tenant_allocation_context();
assert(NULL != tac, "just checking");
tac->set_cached_compact_point(_cp);
}
}
} }
void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp, void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
......
...@@ -79,6 +79,7 @@ class G1PrepareCompactClosure : public HeapRegionClosure { ...@@ -79,6 +79,7 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h; G1CollectedHeap* _g1h;
ModRefBarrierSet* _mrbs; ModRefBarrierSet* _mrbs;
CompactPoint _cp; CompactPoint _cp;
CompactPoint _root_cp;
HeapRegionSetCount _humongous_regions_removed; HeapRegionSetCount _humongous_regions_removed;
virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end); virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
...@@ -86,6 +87,9 @@ class G1PrepareCompactClosure : public HeapRegionClosure { ...@@ -86,6 +87,9 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
void free_humongous_region(HeapRegion* hr); void free_humongous_region(HeapRegion* hr);
bool is_cp_initialized() const { return _cp.space != NULL; } bool is_cp_initialized() const { return _cp.space != NULL; }
// check cp based on alloc context, this is to support TenantHeapIsolation
bool is_cp_initialized_for(AllocationContext_t ac);
public: public:
G1PrepareCompactClosure() : G1PrepareCompactClosure() :
_g1h(G1CollectedHeap::heap()), _g1h(G1CollectedHeap::heap()),
......
...@@ -26,6 +26,11 @@ ...@@ -26,6 +26,11 @@
#include "gc_implementation/g1/g1MarkSweep.hpp" #include "gc_implementation/g1/g1MarkSweep.hpp"
void G1MarkSweep::prepare_compaction() { void G1MarkSweep::prepare_compaction() {
if (TenantHeapIsolation) {
// clear compaction dest info for all tenants
G1TenantAllocationContexts::prepare_for_compaction();
}
G1PrepareCompactClosure blk; G1PrepareCompactClosure blk;
G1MarkSweep::prepare_compaction_work(&blk); G1MarkSweep::prepare_compaction_work(&blk);
} }
...@@ -327,6 +327,14 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots, ...@@ -327,6 +327,14 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
SystemDictionary::roots_oops_do(strong_roots, weak_roots); SystemDictionary::roots_oops_do(strong_roots, weak_roots);
} }
} }
if (TenantHeapIsolation) {
// process references from G1TenantAllocationContext
G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::TenantAllocationContextRoots, worker_i);
if (!_process_strong_tasks.is_task_claimed(G1RP_PS_TenantAllocationContext_oops_do)) {
G1TenantAllocationContexts::oops_do(strong_roots);
}
}
} }
void G1RootProcessor::process_string_table_roots(OopClosure* weak_roots, G1GCPhaseTimes* phase_times, void G1RootProcessor::process_string_table_roots(OopClosure* weak_roots, G1GCPhaseTimes* phase_times,
......
...@@ -65,6 +65,7 @@ class G1RootProcessor : public StackObj { ...@@ -65,6 +65,7 @@ class G1RootProcessor : public StackObj {
G1RP_PS_CodeCache_oops_do, G1RP_PS_CodeCache_oops_do,
G1RP_PS_filter_satb_buffers, G1RP_PS_filter_satb_buffers,
G1RP_PS_refProcessor_oops_do, G1RP_PS_refProcessor_oops_do,
G1RP_PS_TenantAllocationContext_oops_do,
// Leave this one last. // Leave this one last.
G1RP_PS_NumElements G1RP_PS_NumElements
}; };
......
/*
* Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Alibaba designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "precompiled.hpp"
#include "runtime/thread.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/mutexLocker.hpp"
#include "memory/iterator.hpp"
#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
#include "gc_implementation/g1/g1CollectedHeap.hpp"
//----------------------- G1TenantAllocationContext ---------------------------
G1TenantAllocationContext::G1TenantAllocationContext(G1CollectedHeap* g1h)
: _g1h(g1h),
_occupied_heap_region_count(0),
_heap_size_limit(TENANT_HEAP_NO_LIMIT),
_heap_region_limit(0),
_tenant_container(NULL),
_retained_old_gc_alloc_region(NULL) {
assert(TenantHeapIsolation, "pre-condition");
// in current design we do not create G1TenantAllocationContext at safepoint
assert_not_at_safepoint();
#ifndef PRODUCT
if (TraceG1TenantAllocationContext) {
tty->print_cr("Create G1TenantAllocationContext: " PTR_FORMAT, p2i(this));
}
#endif
// init mutator allocator eagerly, because it may be used
// to allocate memory immediately after creation of tenant alloc context
_mutator_alloc_region.init();
AllocationContext_t ac(this);
_mutator_alloc_region.set_allocation_context(ac);
_survivor_gc_alloc_region.set_allocation_context(ac);
_old_gc_alloc_region.set_allocation_context(ac);
G1TenantAllocationContexts::add(this);
}
class ClearAllocationContextClosure : public HeapRegionClosure {
private:
AllocationContext_t _target;
public:
ClearAllocationContextClosure(AllocationContext_t ctxt) : _target(ctxt) {
assert(!_target.is_system(), "Cannot clear root tenant context");
}
virtual bool doHeapRegion(HeapRegion* region) {
assert(TenantHeapIsolation, "pre-condition");
assert(NULL != region, "Region cannot be NULL");
if (region->allocation_context() == _target) {
region->set_allocation_context(AllocationContext::system());
}
return false /* forcefully iterate over all regions */;
}
};
// clean up work that has to be done at safepoint
class DestroyG1TenantAllocationContextOperation : public VM_Operation {
private:
G1TenantAllocationContext* _context_to_destroy;
public:
DestroyG1TenantAllocationContextOperation(G1TenantAllocationContext* context)
: _context_to_destroy(context)
{
assert(TenantHeapIsolation, "pre-condition");
assert(_context_to_destroy != G1TenantAllocationContexts::system_context(),
"Should never destroy system context");
assert(!oopDesc::is_null(_context_to_destroy->tenant_container()), "sanity");
}
virtual void doit();
virtual VMOp_Type type() const { return VMOp_DestroyG1TenantAllocationContext; }
};
void DestroyG1TenantAllocationContextOperation::doit() {
assert_at_safepoint(true /* vm thread */);
if (UsePerTenantTLAB) {
assert(UseTLAB, "Sanity");
for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
thread->clean_tlab_for(_context_to_destroy);
}
}
// return any active mutator alloc region
MutatorAllocRegion* mar = _context_to_destroy->mutator_alloc_region();
HeapRegion* hr = mar->release();
assert(mar->get() == NULL, "post-condition");
if (hr != NULL) { // if this mutator region has been used
// 1, return mutator's heap region to root tenant;
// 2, after GC, objects still live in #1's heap region, will be
// owned by root
hr->set_allocation_context(AllocationContext::system());
}
// traverse all HeapRegions and update alloc contexts
AllocationContext_t ctxt(_context_to_destroy);
ClearAllocationContextClosure cl(ctxt);
G1CollectedHeap::heap()->heap_region_iterate(&cl);
G1TenantAllocationContexts::remove(_context_to_destroy);
com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(_context_to_destroy->tenant_container(),
G1TenantAllocationContexts::system_context());
#ifndef PRODUCT
if (TraceG1TenantAllocationContext) {
tty->print_cr("Destroy G1TenantAllocationContext:" PTR_FORMAT, p2i(_context_to_destroy));
}
#endif
}
G1TenantAllocationContext::~G1TenantAllocationContext() {
assert(TenantHeapIsolation, "pre-condition");
assert_not_at_safepoint();
DestroyG1TenantAllocationContextOperation vm_op(this);
VMThread::execute(&vm_op);
}
void G1TenantAllocationContext::inc_occupied_heap_region_count() {
assert(TenantHeapIsolation && occupied_heap_region_count() >= 0, "pre-condition");
assert(Heap_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint(), "not locked");
Atomic::inc_ptr(&_occupied_heap_region_count);
assert(occupied_heap_region_count() >= 1, "post-condition");
}
void G1TenantAllocationContext::dec_occupied_heap_region_count() {
assert(TenantHeapIsolation && occupied_heap_region_count() >= 1, "pre-condition");
assert(Heap_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint(), "not locked");
Atomic::dec_ptr(&_occupied_heap_region_count);
assert(occupied_heap_region_count() >= 0, "post-condition");
}
G1TenantAllocationContext* G1TenantAllocationContext::current() {
assert(TenantHeapIsolation, "pre-condition");
Thread* thrd = Thread::current();
assert(NULL != thrd, "Failed to get current thread");
return thrd->allocation_context().tenant_allocation_context();
}
size_t G1TenantAllocationContext::heap_bytes_to_region_num(size_t size_in_bytes) {
return heap_words_to_region_num(size_in_bytes >> LogBytesPerWord);
}
size_t G1TenantAllocationContext::heap_words_to_region_num(size_t size_in_words) {
assert(TenantHeapIsolation, "pre-condition");
return align_size_up_(size_in_words, HeapRegion::GrainWords) / HeapRegion::GrainWords;
}
//--------------------- G1TenantAllocationContexts ---------------------
G1TenantAllocationContexts::G1TenantACList* G1TenantAllocationContexts::_contexts = NULL;
Mutex* G1TenantAllocationContexts::_list_lock = NULL;
void G1TenantAllocationContexts::add(G1TenantAllocationContext* tac) {
assert(TenantHeapIsolation, "pre-condition");
if (NULL != tac) {
MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
_contexts->append(tac);
}
}
void G1TenantAllocationContexts::remove(G1TenantAllocationContext* tac) {
assert(TenantHeapIsolation, "pre-condition");
if (NULL != tac) {
MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
_contexts->remove(tac);
}
}
long G1TenantAllocationContexts::active_context_count() {
assert(TenantHeapIsolation, "pre-condition");
assert(NULL != "_contexts", "Tenant alloc context list not initialized");
MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
return _contexts->length();
}
void G1TenantAllocationContexts::iterate(G1TenantAllocationContextClosure* closure) {
assert(TenantHeapIsolation, "pre-condition");
assert(NULL != closure, "NULL closure pointer");
MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
for (GrowableArrayIterator<G1TenantAllocationContext*> itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
closure->do_tenant_allocation_context(*itr);
}
}
void G1TenantAllocationContexts::initialize() {
assert(TenantHeapIsolation, "pre-condition");
_contexts = new (ResourceObj::C_HEAP, mtTenant) G1TenantACList(128, true, mtTenant);
_list_lock = new Mutex(Mutex::leaf, "G1TenantAllocationContext list lock", true /* allow vm lock */);
}
void G1TenantAllocationContexts::prepare_for_compaction() {
assert(TenantHeapIsolation, "pre-condition");
assert_at_safepoint(true /* in vm thread */);
// no locking needed
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
assert(NULL != (*itr), "pre-condition");
(*itr)->_ccp.reset();
}
}
void G1TenantAllocationContexts::oops_do(OopClosure* f) {
assert(TenantHeapIsolation, "pre-condition");
assert(NULL != f, "OopClosure pointer is NULL");
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
assert(NULL != (*itr), "pre-condition");
f->do_oop(&((*itr)->_tenant_container));
}
}
void G1TenantAllocationContexts::init_mutator_alloc_regions() {
assert(TenantHeapIsolation, "pre-condition");
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
assert(NULL != (*itr), "pre-condition");
MutatorAllocRegion& mar = (*itr)->_mutator_alloc_region;
assert(mar.get() == NULL, "pre-condition");
mar.init();
}
}
void G1TenantAllocationContexts::release_mutator_alloc_regions() {
assert(TenantHeapIsolation, "pre-condition");
assert_at_safepoint(true /* in vm thread */);
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
assert(NULL != (*itr), "pre-condition");
MutatorAllocRegion& mar = (*itr)->_mutator_alloc_region;
mar.release();
assert(mar.get() == NULL, "pre-condition");
}
}
size_t G1TenantAllocationContexts::total_used() {
assert(TenantHeapIsolation, "pre-condition");
size_t res = 0;
MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag); // have lock, may not in safepoint
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
assert(NULL != (*itr), "pre-condition");
HeapRegion* hr = (*itr)->_mutator_alloc_region.get();
if (NULL != hr) {
res += hr->used();
}
}
return res;
}
void G1TenantAllocationContexts::init_gc_alloc_regions(G1Allocator* allocator, EvacuationInfo& ei) {
assert(TenantHeapIsolation, "pre-condition");
assert_at_safepoint(true /* in vm thread */);
assert(NULL != allocator, "Allocator cannot be NULL");
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
G1TenantAllocationContext* tac = (*itr);
assert(NULL != tac, "pre-condition");
SurvivorGCAllocRegion& survivor_region = tac->_survivor_gc_alloc_region;
OldGCAllocRegion& old_region = tac->_old_gc_alloc_region;
survivor_region.init();
old_region.init();
allocator->reuse_retained_old_region(ei, &old_region,
&(tac->_retained_old_gc_alloc_region));
}
}
void G1TenantAllocationContexts::release_gc_alloc_regions(EvacuationInfo& ei) {
assert(TenantHeapIsolation, "pre-condition");
assert_at_safepoint(true /* in vm thread */);
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
G1TenantAllocationContext* tac = (*itr);
assert(NULL != tac, "pre-condition");
SurvivorGCAllocRegion& survivor_region = tac->_survivor_gc_alloc_region;
OldGCAllocRegion& old_region = tac->_old_gc_alloc_region;
ei.increment_allocation_regions(survivor_region.count() + old_region.count());
survivor_region.release();
HeapRegion* retained_old = old_region.release();
tac->set_retained_old_gc_alloc_region(retained_old);
if (NULL != tac->retained_old_gc_alloc_region()) {
tac->retained_old_gc_alloc_region()->record_retained_region();
}
}
}
void G1TenantAllocationContexts::abandon_gc_alloc_regions() {
assert(TenantHeapIsolation, "pre-condition");
assert_at_safepoint(true /* in vm thread */);
for (G1TenantACListIterator itr = _contexts->begin();
itr != _contexts->end(); ++itr) {
G1TenantAllocationContext* tac = *itr;
assert(NULL != tac, "pre-condition");
assert(NULL == tac->_survivor_gc_alloc_region.get(), "pre-condition");
assert(NULL == tac->_old_gc_alloc_region.get(), "pre-condition");
(*itr)->set_retained_old_gc_alloc_region(NULL);
}
}
G1TenantAllocationContext* G1TenantAllocationContexts::system_context() {
assert(TenantHeapIsolation, "pre-condition");
return AllocationContext::system().tenant_allocation_context();
}
/*
* Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Alibaba designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_TENANT_CONTEXT_HPP
#define SHARE_VM_GC_IMPLEMENTATION_G1_TENANT_CONTEXT_HPP
#include "gc_implementation/g1/g1AllocationContext.hpp"
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
#include "gc_implementation/g1/g1Allocator.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
#include "runtime/handles.hpp"
#include "runtime/vm_operations.hpp"
class OopClosure;
class G1TenantAllocationContext;
class G1TenantAllocationContexts;
/*
* Closure to encapsulate operations to iterate over all G1TenantAllocationContext
*/
class G1TenantAllocationContextClosure : public Closure {
public:
virtual void do_tenant_allocation_context(G1TenantAllocationContext*) = 0;
};
// By default, no limit on newly created G1TenantAllocationContext
#define TENANT_HEAP_NO_LIMIT 0
/*
* G1TenantAllocationContext identifies a group of isolated Java heap regions associated with
* one TenantContainer.
*
* Only valid when -XX:+TenantHeapIsolation enabled
*
*/
class G1TenantAllocationContext : public CHeapObj<mtTenant> {
friend class VMStructs;
friend class G1TenantAllocationContexts;
private:
G1CollectedHeap* _g1h; // The only g1 heap instance
// Memory allocation related
MutatorAllocRegion _mutator_alloc_region; // mutator regions from young list
SurvivorGCAllocRegion _survivor_gc_alloc_region; // survivor region used during GC
OldGCAllocRegion _old_gc_alloc_region; // Old region used during GC
HeapRegion* _retained_old_gc_alloc_region; // the retained old region for this tenant
// HeapRegion throttling related
size_t _heap_size_limit; // user-defined max heap space for this tenant, in bytes
size_t _heap_region_limit; // user-defined max heap space for this tenant, in heap regions
size_t _occupied_heap_region_count; // number of regions occupied by this tenant
// Tenant alloc context list is now part of root set since each node
// keeps a strong reference to TenantContainer object for containerOf() API
oop _tenant_container; // handle to tenant container object
CachedCompactPoint _ccp; // cached CompactPoint during full GC compaction
public:
// Newly allocated G1TenantAllocationContext will be put at the head of tenant alloc context list
G1TenantAllocationContext(G1CollectedHeap* g1h);
virtual ~G1TenantAllocationContext();
MutatorAllocRegion* mutator_alloc_region() { return &_mutator_alloc_region; }
SurvivorGCAllocRegion* survivor_gc_alloc_region() { return &_survivor_gc_alloc_region; }
OldGCAllocRegion* old_gc_alloc_region() { return &_old_gc_alloc_region; }
HeapRegion* retained_old_gc_alloc_region() { return _retained_old_gc_alloc_region; }
void set_retained_old_gc_alloc_region(HeapRegion* hr) { _retained_old_gc_alloc_region = hr; }
// Get and set tenant container handle
oop tenant_container() const { return _tenant_container; }
void set_tenant_container(oop handle) { _tenant_container = handle; }
// get/set heap size limit
size_t heap_size_limit() const { return _heap_size_limit; }
void set_heap_size_limit(size_t new_size);
// get heap region limit, the size is calculated automatically
size_t heap_region_limit() const { return _heap_region_limit; }
// set/get occupied heap region count
void inc_occupied_heap_region_count();
void dec_occupied_heap_region_count();
size_t occupied_heap_region_count() { return _occupied_heap_region_count; }
// record the compact dest
const CachedCompactPoint& cached_compact_point() const { return _ccp; }
void set_cached_compact_point(CompactPoint cp) { _ccp = cp; }
// Retrieve pointer to current tenant context, NULL if in root container
static G1TenantAllocationContext* current();
private:
// calculate how many regions will a size occupy,
// if 0 < size < HeapRegion::GrainWords or GrainBytes, returns 1; if size == 0, returns 0;
static size_t heap_bytes_to_region_num(size_t size_in_bytes);
static size_t heap_words_to_region_num(size_t size_in_words);
};
// To encapsulate operations for all existing tenants
class G1TenantAllocationContexts : public AllStatic {
friend class VMStructs;
public:
typedef GrowableArray<G1TenantAllocationContext*> G1TenantACList;
typedef GrowableArrayIterator<G1TenantAllocationContext*> G1TenantACListIterator;
private:
// NOTE: below two objects are created on C heap, but never released across
// JVM lifetime, we do this on purpose because they will exists as long
// as Java heap exists, and Java heap will not be destroyed until JVM
// dies.
static G1TenantACList *_contexts; // Tenant contexts are organized into doubly-linked list
static Mutex *_list_lock;
public:
static void add(G1TenantAllocationContext*);
static void remove(G1TenantAllocationContext*);
// initialize shared data
static void initialize();
// Get total number of active tenant containers
static long active_context_count();
// Perform operation upon all tenant alloc contexts
static void iterate(G1TenantAllocationContextClosure* closure);
// Prepare for full GC compaction
static void prepare_for_compaction();
// GC support, we keep a reference to the TenantContainer oop
static void oops_do(OopClosure* f);
static void init_mutator_alloc_regions();
static void release_mutator_alloc_regions();
static size_t total_used();
static void init_gc_alloc_regions(G1Allocator* allocator, EvacuationInfo& ei);
static void release_gc_alloc_regions(EvacuationInfo& ei);
static void abandon_gc_alloc_regions();
static G1TenantAllocationContext* system_context();
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_TENANT_CONTEXT_HPP
...@@ -328,7 +328,13 @@ ...@@ -328,7 +328,13 @@
"Verify the code root lists attached to each heap region.") \ "Verify the code root lists attached to each heap region.") \
\ \
develop(bool, G1VerifyBitmaps, false, \ develop(bool, G1VerifyBitmaps, false, \
"Verifies the consistency of the marking bitmaps") "Verifies the consistency of the marking bitmaps") \
\
develop(bool, TraceG1TenantAllocationContext, false, \
"Trace tenant allocation context changes") \
\
develop(bool, TraceNonRootTenantAllocation, false, \
"Trace memory allocated for non-root tenants") \
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG) G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
......
...@@ -319,7 +319,26 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) { ...@@ -319,7 +319,26 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
CompactibleSpace* HeapRegion::next_compaction_space() const { CompactibleSpace* HeapRegion::next_compaction_space() const {
return G1CollectedHeap::heap()->next_compaction_region(this); if (TenantHeapIsolation) {
assert_at_safepoint(true /* in vm thread */);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
assert(NULL != g1h, "g1h cannot be NULL");
HeapRegion* hr = g1h->next_compaction_region(this);
while (NULL != hr) {
assert(!hr->isHumongous(), "just checking");
if (hr->allocation_context() == allocation_context()) {
return hr;
}
hr = g1h->next_compaction_region(hr);
}
// The worst case is to return 'this', cannot be NULL
assert(NULL != hr, "post-condition");
return hr;
} else {
return G1CollectedHeap::heap()->next_compaction_region(this);
}
} }
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark, void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
...@@ -642,7 +661,15 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const ...@@ -642,7 +661,15 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
void HeapRegion::print() const { print_on(gclog_or_tty); } void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const { void HeapRegion::print_on(outputStream* st) const {
st->print("AC%4u", allocation_context()); if (TenantHeapIsolation) {
if (NULL == tenant_allocation_context()) {
st->print(" TENANT-ROOT");
} else {
assert(!allocation_context().is_system(), "Inconsistent allocation contexts");
st->print(" TENANT-" PTR_FORMAT, allocation_context().tenant_allocation_context());
}
}
st->print(" AC%4u", allocation_context().allocation_context());
st->print(" %2s", get_short_type_str()); st->print(" %2s", get_short_type_str());
if (in_collection_set()) if (in_collection_set())
st->print(" CS"); st->print(" CS");
...@@ -989,6 +1016,29 @@ void HeapRegion::verify(VerifyOption vo, ...@@ -989,6 +1016,29 @@ void HeapRegion::verify(VerifyOption vo,
verify_strong_code_roots(vo, failures); verify_strong_code_roots(vo, failures);
} }
void HeapRegion::set_allocation_context(AllocationContext_t context) {
assert(Heap_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint(), "not locked");
if (TenantHeapIsolation && context != allocation_context() /* do not count self-set */) {
if (context.is_system()) {
assert(!allocation_context().is_system(), "pre-condition");
G1TenantAllocationContext* tac = allocation_context().tenant_allocation_context();
assert(NULL != tac, "pre-condition");
tac->dec_occupied_heap_region_count();
} else {
assert(allocation_context().is_system(), "pre-condition");
G1TenantAllocationContext* tac = context.tenant_allocation_context();
assert(NULL != tac, "pre-condition");
tac->inc_occupied_heap_region_count();
}
} else {
DEBUG_ONLY(assert(!TenantHeapIsolation
|| (context.is_system() && allocation_context().is_system()), "just checking"));
}
_allocation_context = context;
}
void HeapRegion::verify() const { void HeapRegion::verify() const {
bool dummy = false; bool dummy = false;
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy); verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
......
...@@ -350,6 +350,12 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -350,6 +350,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
// All allocated blocks are occupied by objects in a HeapRegion // All allocated blocks are occupied by objects in a HeapRegion
bool block_is_obj(const HeapWord* p) const; bool block_is_obj(const HeapWord* p) const;
// Get and set tenant allocation context of this heap region
const G1TenantAllocationContext* tenant_allocation_context() const {
assert(TenantHeapIsolation, "pre-condition");
return allocation_context().tenant_allocation_context();
}
// Returns the object size for all valid block starts // Returns the object size for all valid block starts
// and the amount of unallocated words if called on top() // and the amount of unallocated words if called on top()
size_t block_size(const HeapWord* p) const; size_t block_size(const HeapWord* p) const;
...@@ -512,11 +518,9 @@ class HeapRegion: public G1OffsetTableContigSpace { ...@@ -512,11 +518,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
_next_in_special_set = r; _next_in_special_set = r;
} }
void set_allocation_context(AllocationContext_t context) { void set_allocation_context(AllocationContext_t context);
_allocation_context = context;
}
AllocationContext_t allocation_context() const { const AllocationContext_t& allocation_context() const {
return _allocation_context; return _allocation_context;
} }
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "gc_implementation/g1/heapRegion.hpp" #include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp" #include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
#define VM_STRUCTS_G1(nonstatic_field, static_field) \ #define VM_STRUCTS_G1(nonstatic_field, static_field) \
\ \
...@@ -64,6 +65,10 @@ ...@@ -64,6 +65,10 @@
\ \
nonstatic_field(HeapRegionSetCount, _length, uint) \ nonstatic_field(HeapRegionSetCount, _length, uint) \
nonstatic_field(HeapRegionSetCount, _capacity, size_t) \ nonstatic_field(HeapRegionSetCount, _capacity, size_t) \
nonstatic_field(G1TenantAllocationContext, _heap_size_limit, size_t) \
nonstatic_field(G1TenantAllocationContext, _heap_region_limit, size_t) \
nonstatic_field(G1TenantAllocationContext, _occupied_heap_region_count, size_t) \
nonstatic_field(G1TenantAllocationContext, _tenant_container, oop) \
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \ #define VM_TYPES_G1(declare_type, declare_toplevel_type) \
...@@ -84,6 +89,8 @@ ...@@ -84,6 +89,8 @@
declare_toplevel_type(HeapRegion*) \ declare_toplevel_type(HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \ declare_toplevel_type(G1MonitoringSupport*) \
declare_toplevel_type(G1Allocator*) \ declare_toplevel_type(G1Allocator*) \
declare_toplevel_type(G1TenantAllocationContext) \
declare_toplevel_type(G1TenantAllocationContext*) \
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP #endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
...@@ -94,6 +94,8 @@ void VM_G1IncCollectionPause::doit() { ...@@ -94,6 +94,8 @@ void VM_G1IncCollectionPause::doit() {
"only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle"); "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
if (_word_size > 0) { if (_word_size > 0) {
AllocationContextMark acm(this->allocation_context());
// An allocation has been requested. So, try to do that first. // An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
false /* expect_null_cur_alloc_region */); false /* expect_null_cur_alloc_region */);
...@@ -147,6 +149,7 @@ void VM_G1IncCollectionPause::doit() { ...@@ -147,6 +149,7 @@ void VM_G1IncCollectionPause::doit() {
_pause_succeeded = _pause_succeeded =
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) { if (_pause_succeeded && _word_size > 0) {
AllocationContextMark acm(this->allocation_context());
// An allocation had been requested. // An allocation had been requested.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
true /* expect_null_cur_alloc_region */); true /* expect_null_cur_alloc_region */);
......
...@@ -525,7 +525,16 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) { ...@@ -525,7 +525,16 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
"Attempt to fill tlabs before main thread has been added" "Attempt to fill tlabs before main thread has been added"
" to threads list is doomed to failure!"); " to threads list is doomed to failure!");
for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) { for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
if (use_tlab) thread->tlab().make_parsable(retire_tlabs); if (use_tlab) {
if (UsePerTenantTLAB) {
thread->make_all_tlabs_parsable(retire_tlabs,
// do not delete saved TLABs to make per-thread + per-tenant
// TLAB adaptive size policy to take effect
false /* delete saved TLABs */);
} else {
thread->tlab().make_parsable(retire_tlabs);
}
}
#ifdef COMPILER2 #ifdef COMPILER2
// The deferred store barriers must all have been flushed to the // The deferred store barriers must all have been flushed to the
// card-table (or other remembered set structure) before GC starts // card-table (or other remembered set structure) before GC starts
......
...@@ -154,8 +154,9 @@ enum MemoryType { ...@@ -154,8 +154,9 @@ enum MemoryType {
mtChunk = 0x0C, // chunk that holds content of arenas mtChunk = 0x0C, // chunk that holds content of arenas
mtTest = 0x0D, // Test type for verifying NMT mtTest = 0x0D, // Test type for verifying NMT
mtTracing = 0x0E, // memory used for Tracing mtTracing = 0x0E, // memory used for Tracing
mtNone = 0x0F, // undefined mtTenant = 0x0F, // memory used by MultiTenant code
mt_number_of_types = 0x10 // number of memory types (mtDontTrack mtNone = 0x10, // undefined
mt_number_of_types = 0x11 // number of memory types (mtDontTrack
// is not included as validate type) // is not included as validate type)
}; };
......
...@@ -45,6 +45,27 @@ ...@@ -45,6 +45,27 @@
#include "utilities/globalDefinitions.hpp" #include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp" #include "utilities/macros.hpp"
#if INCLUDE_ALL_GCS
CompactPoint& CompactPoint::operator = (const CompactPoint& cp) {
space = cp.space;
threshold = cp.threshold;
gen = cp.gen;
return *this;
}
CompactPoint& CompactPoint::operator = (const CachedCompactPoint& ccp) {
space = ccp.space;
threshold = ccp.threshold;
return *this;
}
CachedCompactPoint& CachedCompactPoint::operator = (const CompactPoint& cp) {
space = cp.space;
threshold = cp.threshold;
return *this;
}
#endif
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
......
...@@ -323,6 +323,10 @@ public: ...@@ -323,6 +323,10 @@ public:
#endif #endif
}; };
#if INCLUDE_ALL_GCS
class CachedCompactPoint;
#endif
// A structure to represent a point at which objects are being copied // A structure to represent a point at which objects are being copied
// during compaction. // during compaction.
class CompactPoint : public StackObj { class CompactPoint : public StackObj {
...@@ -333,8 +337,29 @@ public: ...@@ -333,8 +337,29 @@ public:
CompactPoint(Generation* g = NULL) : CompactPoint(Generation* g = NULL) :
gen(g), space(NULL), threshold(0) {} gen(g), space(NULL), threshold(0) {}
#if INCLUDE_ALL_GCS
CompactPoint& operator = (const CompactPoint&);
CompactPoint& operator = (const CachedCompactPoint&);
#endif
};
#if INCLUDE_ALL_GCS
// To cache CompactPoint info on C heap
class CachedCompactPoint : public CHeapObj<mtTenant> {
public:
// Generation* is not used by G1, thus no need to cache it
CompactibleSpace* space; // cached space
HeapWord* threshold; // cached threshold
public:
CachedCompactPoint() : space(NULL), threshold(NULL) {}
void reset() { space = NULL; threshold = NULL; }
CachedCompactPoint& operator = (const CompactPoint&);
}; };
#endif
// A space that supports compaction operations. This is usually, but not // A space that supports compaction operations. This is usually, but not
// necessarily, a space that is normally contiguous. But, for example, a // necessarily, a space that is normally contiguous. But, for example, a
// free-list-based space whose normal collection is a mark-sweep without // free-list-based space whose normal collection is a mark-sweep without
......
...@@ -209,6 +209,12 @@ void ThreadLocalAllocBuffer::initialize() { ...@@ -209,6 +209,12 @@ void ThreadLocalAllocBuffer::initialize() {
_allocation_fraction.sample(alloc_frac); _allocation_fraction.sample(alloc_frac);
} }
#if INCLUDE_ALL_GCS
if (UsePerTenantTLAB) {
_my_thread = Thread::current();
}
#endif
set_refill_waste_limit(initial_refill_waste_limit()); set_refill_waste_limit(initial_refill_waste_limit());
initialize_statistics(); initialize_statistics();
...@@ -287,11 +293,39 @@ void ThreadLocalAllocBuffer::verify() { ...@@ -287,11 +293,39 @@ void ThreadLocalAllocBuffer::verify() {
} }
Thread* ThreadLocalAllocBuffer::myThread() { Thread* ThreadLocalAllocBuffer::myThread() {
#if INCLUDE_ALL_GCS
if (UsePerTenantTLAB) {
return _my_thread;
}
#endif
return (Thread*)(((char *)this) + return (Thread*)(((char *)this) +
in_bytes(start_offset()) - in_bytes(start_offset()) -
in_bytes(Thread::tlab_start_offset())); in_bytes(Thread::tlab_start_offset()));
} }
#if INCLUDE_ALL_GCS
void ThreadLocalAllocBuffer::swap_content(ThreadLocalAllocBuffer* peer) {
assert(UseG1GC && TenantHeapIsolation
&& UsePerTenantTLAB && peer != NULL, "sanity");
assert(peer->myThread() == this->myThread()
&& (Thread::current() == this->myThread() || SafepointSynchronize::is_at_safepoint()),
"only for self thread");
// do swapping
unsigned char buf[sizeof(ThreadLocalAllocBuffer)];
memcpy(buf, this, sizeof(ThreadLocalAllocBuffer));
memcpy(this, peer, sizeof(ThreadLocalAllocBuffer));
memcpy(peer, buf, sizeof(ThreadLocalAllocBuffer));
// restore linkage info
ThreadLocalAllocBuffer* tmp_next = this->next();
this->set_next(peer->next());
peer->set_next(tmp_next);
}
#endif // #if INCLUDE_ALL_GCS
GlobalTLABStats::GlobalTLABStats() : GlobalTLABStats::GlobalTLABStats() :
_allocating_threads_avg(TLABAllocationWeight) { _allocating_threads_avg(TLABAllocationWeight) {
......
...@@ -31,6 +31,10 @@ ...@@ -31,6 +31,10 @@
class GlobalTLABStats; class GlobalTLABStats;
#if INCLUDE_ALL_GCS
class G1TenantAllocationContext;
#endif
// ThreadLocalAllocBuffer: a descriptor for thread-local storage used by // ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
// the threads for allocation. // the threads for allocation.
// It is thread-private at any time, but maybe multiplexed over // It is thread-private at any time, but maybe multiplexed over
...@@ -101,7 +105,14 @@ private: ...@@ -101,7 +105,14 @@ private:
static GlobalTLABStats* global_stats() { return _global_stats; } static GlobalTLABStats* global_stats() { return _global_stats; }
public: public:
ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) { ThreadLocalAllocBuffer()
: _allocation_fraction(TLABAllocationWeight),
#if INCLUDE_ALL_GCS
_next(NULL),
_tenant_context(NULL),
_my_thread(NULL),
#endif // #if INCLUDE_ALL_GCS
_allocated_before_last_gc(0) {
// do nothing. tlabs must be inited by initialize() calls // do nothing. tlabs must be inited by initialize() calls
} }
...@@ -173,6 +184,31 @@ public: ...@@ -173,6 +184,31 @@ public:
static ByteSize slow_allocations_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _slow_allocations ); } static ByteSize slow_allocations_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _slow_allocations ); }
void verify(); void verify();
#if INCLUDE_ALL_GCS
private:
// retained TLAB support
G1TenantAllocationContext* _tenant_context; // Tenant allocation context associated with this TLAB
ThreadLocalAllocBuffer* _next; // linked to next TLAB
//
// This is a little redundant, but have to keep a pointer to its owner thread
// when -XX:+UsePerTenantTLAB specified. In that case, a TLAB object may be
// allocated directly on C heap, instead of being embeded in Thread object.
// Which means owner Thread address can no longer be calculated by using
// 'cur_tlab->_start + Thread::tlab_start_offset()'.
//
Thread* _my_thread;
public:
G1TenantAllocationContext* tenant_allocation_context() const { return _tenant_context; }
void set_tenant_allocation_context(G1TenantAllocationContext* context) { _tenant_context = context; }
// retain/get per-tenant TLAB support
ThreadLocalAllocBuffer* next() const { return _next; }
void set_next(ThreadLocalAllocBuffer* next) { _next = next; }
// swap context with another TLABs
void swap_content(ThreadLocalAllocBuffer* peer);
#endif // #if INCLUDE_ALL_GCS
}; };
class GlobalTLABStats: public CHeapObj<mtThread> { class GlobalTLABStats: public CHeapObj<mtThread> {
......
...@@ -3876,13 +3876,51 @@ JVM_END ...@@ -3876,13 +3876,51 @@ JVM_END
/***************** Tenant support ************************************/ /***************** Tenant support ************************************/
JVM_ENTRY(void, JVM_AttachToTenant(JNIEnv *env, jobject tenant)) JVM_ENTRY(jobject, JVM_TenantContainerOf(JNIEnv* env, jclass tenantContainerClass, jobject obj))
JVMWrapper("JVM_TenantContainerOf");
assert(MultiTenant && TenantHeapIsolation, "pre-condition");
if (NULL != obj) {
oop container = G1CollectedHeap::heap()->tenant_container_of(JNIHandles::resolve_non_null(obj));
if (container != NULL) {
return JNIHandles::make_local(env, container);
}
}
return NULL;
JVM_END
JVM_ENTRY(void, JVM_AttachToTenant(JNIEnv *env, jobject ignored, jobject tenant))
JVMWrapper("JVM_AttachToTenant"); JVMWrapper("JVM_AttachToTenant");
assert(MultiTenant, "pre-condition"); assert(MultiTenant, "pre-condition");
assert (NULL != thread, "no current thread!"); assert (NULL != thread, "no current thread!");
thread->set_tenantObj(tenant == NULL ? (oop)NULL : JNIHandles::resolve_non_null(tenant)); thread->set_tenantObj(tenant == NULL ? (oop)NULL : JNIHandles::resolve_non_null(tenant));
JVM_END JVM_END
JVM_ENTRY(void, JVM_CreateTenantAllocationContext(JNIEnv *env, jobject ignored, jobject tenant, jlong heapLimit))
JVMWrapper("JVM_CreateTenantAllocationContext");
guarantee(UseG1GC && TenantHeapIsolation, "pre-condition");
oop tenant_obj = JNIHandles::resolve_non_null(tenant);
assert(tenant_obj != NULL, "Cannot create allocation context for a null tenant container");
G1CollectedHeap::heap()->create_tenant_allocation_context(tenant_obj);
JVM_END
// This method should be called before reclaiming of Java TenantContainer object
JVM_ENTRY(void, JVM_DestroyTenantAllocationContext(JNIEnv *env, jobject ignored, jlong context))
JVMWrapper("JVM_DestroyTenantAllocationContext");
assert(UseG1GC && TenantHeapIsolation, "pre-condition");
oop tenant_obj = ((G1TenantAllocationContext*)context)->tenant_container();
assert(tenant_obj != NULL, "Cannot destroy allocation context from a null tenant container");
G1CollectedHeap::heap()->destroy_tenant_allocation_context(context);
JVM_END
JVM_ENTRY(jlong, JVM_GetTenantOccupiedMemory(JNIEnv* env, jobject ignored, jlong context))
JVMWrapper("JVM_GetTenantOccupiedMemory");
assert(UseG1GC && TenantHeapIsolation, "pre-condition");
G1TenantAllocationContext* alloc_context = (G1TenantAllocationContext*)context;
assert(alloc_context != NULL, "Bad allocation context!");
assert(alloc_context->tenant_container() != NULL, "NULL tenant container");
return (alloc_context->occupied_heap_region_count() * HeapRegion::GrainBytes);
JVM_END
// Array /////////////////////////////////////////////////////////////////////////////////////////// // Array ///////////////////////////////////////////////////////////////////////////////////////////
......
...@@ -361,7 +361,19 @@ JVM_LoadClass0(JNIEnv *env, jobject obj, jclass currClass, ...@@ -361,7 +361,19 @@ JVM_LoadClass0(JNIEnv *env, jobject obj, jclass currClass,
* com.alibaba.tenant.TenantContainer * com.alibaba.tenant.TenantContainer
*/ */
JNIEXPORT void JNICALL JNIEXPORT void JNICALL
JVM_AttachToTenant(JNIEnv *env, jobject tenant); JVM_AttachToTenant(JNIEnv *env, jobject ignored, jobject tenant);
JNIEXPORT void JNICALL
JVM_CreateTenantAllocationContext(JNIEnv *env, jobject ignored, jobject tenant, jlong heapLimit);
JNIEXPORT void JNICALL
JVM_DestroyTenantAllocationContext(JNIEnv *env, jobject ignored, jlong context);
JNIEXPORT jobject JNICALL
JVM_TenantContainerOf(JNIEnv *env, jclass tenantContainerClass, jobject obj);
JNIEXPORT long JNICALL
JVM_GetTenantOccupiedMemory(JNIEnv *env, jobject ignored, jlong context);
/* /*
* java.lang.reflect.Array * java.lang.reflect.Array
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
*/ */
#define TENANT_FLAG_MULTI_TENANT_ENABLED (0x1) // bit 0 to indicate if the tenant feature is enabled. #define TENANT_FLAG_MULTI_TENANT_ENABLED (0x1) // bit 0 to indicate if the tenant feature is enabled.
#define TENANT_FLAG_HEAP_ISOLATION_ENABLED (0x80) // bit 7 to indicate if heap isolation feature is enabled.
static jint tenant_GetTenantFlags(TenantEnv *env, jclass cls); static jint tenant_GetTenantFlags(TenantEnv *env, jclass cls);
...@@ -49,5 +50,9 @@ tenant_GetTenantFlags(TenantEnv *env, jclass cls) ...@@ -49,5 +50,9 @@ tenant_GetTenantFlags(TenantEnv *env, jclass cls)
result |= TENANT_FLAG_MULTI_TENANT_ENABLED; result |= TENANT_FLAG_MULTI_TENANT_ENABLED;
} }
if (TenantHeapIsolation) {
result |= TENANT_FLAG_HEAP_ISOLATION_ENABLED;
}
return result; return result;
} }
...@@ -1403,6 +1403,17 @@ WB_ENTRY(jboolean, WB_TestFixDanglingPointerInDeopt(JNIEnv* env, jobject o, jstr ...@@ -1403,6 +1403,17 @@ WB_ENTRY(jboolean, WB_TestFixDanglingPointerInDeopt(JNIEnv* env, jobject o, jstr
} }
return true; return true;
WB_END WB_END
WB_ENTRY(jboolean, WB_IsInCurrentTLAB(JNIEnv* env, jobject wb, jobject o))
ThreadToNativeFromVM ttn(thread);
if (o != NULL) {
HeapWord* addr = (HeapWord*)JNIHandles::resolve_non_null(o);
ThreadLocalAllocBuffer& tlab = Thread::current()->tlab();
return (addr >= tlab.start() && addr < tlab.end()) ? JNI_TRUE : JNI_FALSE;
}
return JNI_FALSE;
WB_END
#define CC (char*) #define CC (char*)
static JNINativeMethod methods[] = { static JNINativeMethod methods[] = {
...@@ -1538,7 +1549,8 @@ static JNINativeMethod methods[] = { ...@@ -1538,7 +1549,8 @@ static JNINativeMethod methods[] = {
{CC"testFixDanglingPointerInDeopt", {CC"testFixDanglingPointerInDeopt",
CC"(Ljava/lang/String;)Z", (void*)&WB_TestFixDanglingPointerInDeopt}, CC"(Ljava/lang/String;)Z", (void*)&WB_TestFixDanglingPointerInDeopt},
{CC"getClassInitOrderList", CC"()[Ljava/lang/String;", {CC"getClassInitOrderList", CC"()[Ljava/lang/String;",
(void*)&WB_GetClassInitOrderList } (void*)&WB_GetClassInitOrderList },
{CC"isInCurrentTLAB", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsInCurrentTLAB },
}; };
#undef CC #undef CC
......
/*
* Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Alibaba designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
#include "runtime/arguments_ext.hpp"
#include "runtime/java.hpp"
void ArgumentsExt::set_tenant_flags() {
// TenantHeapIsolation directly depends on MultiTenant, UseG1GC
if (TenantHeapIsolation) {
if (FLAG_IS_DEFAULT(MultiTenant)) {
FLAG_SET_ERGO(bool, MultiTenant, true);
}
if (UseTLAB && FLAG_IS_DEFAULT(UsePerTenantTLAB)) {
// enable per-tenant TLABs if unspecified and heap isolation is enabled
FLAG_SET_ERGO(bool, UsePerTenantTLAB, true);
}
// check GC policy compatibility
if (!UseG1GC) {
vm_exit_during_initialization("-XX:+TenantHeapIsolation only works with -XX:+UseG1GC");
}
if (!MultiTenant) {
vm_exit_during_initialization("Cannot use multi-tenant features if -XX:-MultiTenant specified");
}
}
// UsePerTenantTLAB depends on TenantHeapIsolation and UseTLAB
if (UsePerTenantTLAB) {
if (!TenantHeapIsolation || !UseTLAB) {
vm_exit_during_initialization("-XX:+UsePerTenantTLAB only works with -XX:+TenantHeapIsolation and -XX:+UseTLAB");
}
}
}
...@@ -29,6 +29,8 @@ ...@@ -29,6 +29,8 @@
#include "runtime/arguments.hpp" #include "runtime/arguments.hpp"
class ArgumentsExt: AllStatic { class ArgumentsExt: AllStatic {
private:
static void set_tenant_flags();
public: public:
static inline void set_gc_specific_flags(); static inline void set_gc_specific_flags();
static void process_options(const JavaVMInitArgs* args) {} static void process_options(const JavaVMInitArgs* args) {}
...@@ -36,6 +38,8 @@ public: ...@@ -36,6 +38,8 @@ public:
void ArgumentsExt::set_gc_specific_flags() { void ArgumentsExt::set_gc_specific_flags() {
Arguments::set_gc_specific_flags(); Arguments::set_gc_specific_flags();
set_tenant_flags();
} }
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP #endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
...@@ -95,7 +95,13 @@ ...@@ -95,7 +95,13 @@
"commit/uncommit. 0 be same as ConcGCThreads") \ "commit/uncommit. 0 be same as ConcGCThreads") \
\ \
product(bool, MultiTenant, false, \ product(bool, MultiTenant, false, \
"Enable the multi-tenant feature.") \ "Enable the multi-tenant feature.") \
\
product(bool, TenantHeapIsolation, false, \
"Isolates heap memory used by different TenantContainers") \
\
product(bool, UsePerTenantTLAB, false, \
"Mutator may maintain multiple TLABs for each of the tenants") \
//add new AJVM specific flags here //add new AJVM specific flags here
......
...@@ -307,6 +307,10 @@ Thread::Thread() { ...@@ -307,6 +307,10 @@ Thread::Thread() {
"bug in forced alignment of thread objects"); "bug in forced alignment of thread objects");
} }
#endif /* ASSERT */ #endif /* ASSERT */
if (UseG1GC) {
_alloc_context = AllocationContext::system();
}
} }
void Thread::initialize_thread_local_storage() { void Thread::initialize_thread_local_storage() {
...@@ -1921,7 +1925,11 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { ...@@ -1921,7 +1925,11 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
remove_stack_guard_pages(); remove_stack_guard_pages();
if (UseTLAB) { if (UseTLAB) {
tlab().make_parsable(true); // retire TLAB if (UsePerTenantTLAB) {
make_all_tlabs_parsable(true, true);
} else {
tlab().make_parsable(true); // retire TLAB
}
} }
if (JvmtiEnv::environments_might_exist()) { if (JvmtiEnv::environments_might_exist()) {
...@@ -1947,6 +1955,148 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) { ...@@ -1947,6 +1955,148 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
Threads::remove(this); Threads::remove(this);
} }
// NOTE: active TLABs will only be retired & deleted at safepoint or thread ending.
// it is OK to destroy a G1TenantAllocationContext while its previously
// used TLABs are still linked in any mutator threads, because no further alloc requests
// can happen on this stale TLAB, its remaining free space cannot be
// used by any other threads or tenants either.
#if INCLUDE_ALL_GCS
void Thread::make_all_tlabs_parsable(bool retire, bool delete_saved) {
assert(UseG1GC && TenantHeapIsolation
&& UseTLAB && UsePerTenantTLAB, "pre-condition");
for (ThreadLocalAllocBuffer* tlab = &(this->tlab()); tlab != NULL;
tlab = tlab->next()) {
tlab->make_parsable(retire);
}
if (delete_saved) {
assert(retire, "should only delete after retire!");
ThreadLocalAllocBuffer* tlab = this->tlab().next();
while (tlab != NULL) {
ThreadLocalAllocBuffer* next = tlab->next();
delete tlab;
tlab = next;
}
this->tlab().set_next(NULL);
}
}
void Thread::clean_tlab_for(const G1TenantAllocationContext* context) {
assert(UseG1GC && TenantHeapIsolation
&& UseTLAB && UsePerTenantTLAB, "sanity");
assert(SafepointSynchronize::is_at_safepoint()
&& Thread::current()->is_VM_thread(), "pre-condition");
guarantee(context != G1TenantAllocationContexts::system_context(),
"never clean root tenant context");
if (this->is_Java_thread()) {
JavaThread* java_thread = (JavaThread*)this;
// make sure TLAB's tenant allocation context is same as Java thread's
guarantee(java_thread->tenant_allocation_context() == this->tlab().tenant_allocation_context(),
err_msg("Inconsistent tenant allocation context thread=" PTR_FORMAT ",context=" PTR_FORMAT
", but its TLAB's context=" PTR_FORMAT,
java_thread,
java_thread->tenant_allocation_context(),
this->tlab().tenant_allocation_context()));
}
// if the to-be-deleted context is current active context,
// we just completely switch to ROOT tenant's TLAB
const G1TenantAllocationContext* context_to_search =
this->tlab().tenant_allocation_context() == context ? G1TenantAllocationContexts::system_context() : context;
for (ThreadLocalAllocBuffer* tlab = &(this->tlab()), *prev = NULL;
tlab != NULL;
prev = tlab, tlab = tlab->next())
{
if (tlab->tenant_allocation_context() == context_to_search) {
guarantee(prev != NULL, "Cannot be an in-use TLAB");
if (context_to_search == G1TenantAllocationContexts::system_context()) {
guarantee(this->tlab().tenant_allocation_context() == context,
"must be in-use TLAB");
guarantee(tlab != &(this->tlab()),
"Cannot be root context");
this->tlab().make_parsable(true);
if (this->is_Java_thread()) {
// set_tenantObj will do search and swap, without changing the list structure
((JavaThread*)this)->set_tenantObj(NULL);
} else {
this->tlab().swap_content(tlab);
}
} else {
guarantee(this->tlab().tenant_allocation_context() != context,
"cannot be in-use TLAB");
tlab->make_parsable(true);
}
// remove the 'dead' TLAB from list
ThreadLocalAllocBuffer* next_tlab = tlab->next();
prev->set_next(next_tlab);
delete tlab;
return;
}
}
}
const AllocationContext_t& Thread::allocation_context() const {
assert(UseG1GC, "Only G1 policy supported");
return _alloc_context;
}
void Thread::set_allocation_context(AllocationContext_t context) {
assert(UseG1GC, "Only G1 policy supported");
assert(Thread::current() == this
|| (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()
&& VMThread::vm_operation() != NULL
&& VMThread::vm_operation()->type() == VM_Operation::VMOp_DestroyG1TenantAllocationContext),
"Only allowed to be set by self thread or tenant destruction vm_op");
_alloc_context = context;
if (UseG1GC && TenantHeapIsolation
&& UseTLAB
&& this->is_Java_thread()) { // only for Java thread, though _tlab is in Thread
if (UsePerTenantTLAB) {
G1TenantAllocationContext* tac = context.tenant_allocation_context();
ThreadLocalAllocBuffer* tlab = &(this->tlab());
assert(tlab != NULL, "Attach to same tenant twice!");
if (tlab->tenant_allocation_context() == tac) {
// no need to switch TLAB
assert(tac == G1TenantAllocationContexts::system_context(),
"Must be ROOT allocation context");
return;
}
// traverse saved TLAB list to search used TALBs for 'tac'
do {
tlab = tlab->next();
if (tlab != NULL && tlab->tenant_allocation_context() == tac) {
this->tlab().swap_content(tlab);
break;
}
} while (tlab != NULL);
// cannot find a saved TLAB, this is the first time current thread running into 'tac'
if (tlab == NULL) {
ThreadLocalAllocBuffer* new_tlab = new ThreadLocalAllocBuffer();
new_tlab->initialize();
// link to list
new_tlab->set_next(this->tlab().next());
new_tlab->set_tenant_allocation_context(tac);
this->tlab().set_next(new_tlab);
// make the new TLAB active
this->tlab().swap_content(new_tlab);
}
} else {
tlab().make_parsable(true /* retire */);
}
}
}
#endif // INCLUDE_ALL_GCS
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
// Flush G1-related queues. // Flush G1-related queues.
void JavaThread::flush_barrier_queues() { void JavaThread::flush_barrier_queues() {
...@@ -1954,6 +2104,64 @@ void JavaThread::flush_barrier_queues() { ...@@ -1954,6 +2104,64 @@ void JavaThread::flush_barrier_queues() {
dirty_card_queue().flush(); dirty_card_queue().flush();
} }
G1TenantAllocationContext* JavaThread::tenant_allocation_context() {
assert(TenantHeapIsolation, "pre-condition");
oop tenant_obj = tenantObj();
return (tenant_obj == NULL ? NULL : com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(tenant_obj));
}
void JavaThread::set_tenant_allocation_context(G1TenantAllocationContext* context) {
assert(TenantHeapIsolation, "pre-condition");
set_tenantObj(context == NULL ? (oop)NULL : context->tenant_container());
}
void JavaThread::set_tenantObj(oop tenantObj) {
assert(MultiTenant
// prevent duplicate assigning values of non-ROOT tenant;
// but allow duplicated values of ROOT tenant, to support
// TenantContainer.destroy() while alive threads attached
&& (_tenantObj != tenantObj || tenantObj == NULL),
"pre-condition");
if (_tenantObj == tenantObj) {
return;
}
oop prev_tenant = _tenantObj;
_tenantObj = tenantObj;
#if INCLUDE_ALL_GCS
if (UseG1GC) {
set_allocation_context(AllocationContext_t(tenantObj == NULL ?
NULL : com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(tenantObj)));
#ifndef PRODUCT
if (TenantHeapIsolation && UseTLAB && UsePerTenantTLAB) {
G1TenantAllocationContext* prev_context = prev_tenant == NULL ? NULL /* root tenant */
: com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(prev_tenant);
//
// thread was attached to tenant container whose allocation context is ROOT tenant's,
// which means the tenant container is DEAD.
//
// in current implementation, inconsistency between TenantContainer object
// and its G1TenantAllocationContext pointer is allowed.
// when a TenantContainer is destroyed before all attached threads get detached,
// JVM will just switch all the allocation contexts of attached threads to ROOT tenant,
// including the pointer recorded in TenantContainer object.
//
if (prev_tenant != NULL && prev_context == G1TenantAllocationContexts::system_context()) {
assert(com_alibaba_tenant_TenantContainer::is_dead(prev_tenant),
"Must be dead TenantContainer");
}
}
#endif
}
#endif // #if INCLUDE_ALL_GCS
}
void JavaThread::initialize_queues() { void JavaThread::initialize_queues() {
assert(!SafepointSynchronize::is_at_safepoint(), assert(!SafepointSynchronize::is_at_safepoint(),
"we should not be at a safepoint"); "we should not be at a safepoint");
...@@ -2000,7 +2208,11 @@ void JavaThread::cleanup_failed_attach_current_thread() { ...@@ -2000,7 +2208,11 @@ void JavaThread::cleanup_failed_attach_current_thread() {
remove_stack_guard_pages(); remove_stack_guard_pages();
if (UseTLAB) { if (UseTLAB) {
tlab().make_parsable(true); // retire TLAB, if any if (UsePerTenantTLAB) {
this->make_all_tlabs_parsable(true, false);
} else {
tlab().make_parsable(true); // retire TLAB, if any
}
} }
#if INCLUDE_ALL_GCS #if INCLUDE_ALL_GCS
...@@ -4733,7 +4945,6 @@ void Thread::muxRelease (volatile intptr_t * Lock) { ...@@ -4733,7 +4945,6 @@ void Thread::muxRelease (volatile intptr_t * Lock) {
} }
} }
void Threads::verify() { void Threads::verify() {
ALL_JAVA_THREADS(p) { ALL_JAVA_THREADS(p) {
p->verify(); p->verify();
......
...@@ -81,6 +81,10 @@ class GCTaskQueue; ...@@ -81,6 +81,10 @@ class GCTaskQueue;
class ThreadClosure; class ThreadClosure;
class IdealGraphPrinter; class IdealGraphPrinter;
#if INCLUDE_ALL_GCS
class G1TenantAllocationContext;
#endif
class Metadata; class Metadata;
template <class T, MEMFLAGS F> class ChunkedList; template <class T, MEMFLAGS F> class ChunkedList;
typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer; typedef ChunkedList<Metadata*, mtInternal> MetadataOnStackBuffer;
...@@ -457,6 +461,11 @@ class Thread: public ThreadShadow { ...@@ -457,6 +461,11 @@ class Thread: public ThreadShadow {
tlab().initialize(); tlab().initialize();
} }
} }
#if INCLUDE_ALL_GCS
void make_all_tlabs_parsable(bool retire, bool delete_saved);
// called during tenantContainer destruction
void clean_tlab_for(const G1TenantAllocationContext* context);
#endif // if INCLUDE_ALL_GCS
jlong allocated_bytes() { return _allocated_bytes; } jlong allocated_bytes() { return _allocated_bytes; }
void set_allocated_bytes(jlong value) { _allocated_bytes = value; } void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
...@@ -677,6 +686,13 @@ protected: ...@@ -677,6 +686,13 @@ protected:
static void muxAcquire (volatile intptr_t * Lock, const char * Name) ; static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ; static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
static void muxRelease (volatile intptr_t * Lock) ; static void muxRelease (volatile intptr_t * Lock) ;
private:
AllocationContext_t _alloc_context; // context for Java allocation requests
// put it here because allocation may happen in VM thread
public:
const AllocationContext_t& allocation_context() const;
void set_allocation_context(AllocationContext_t context);
}; };
// Inline implementation of Thread::current() // Inline implementation of Thread::current()
...@@ -1051,7 +1067,13 @@ class JavaThread: public Thread { ...@@ -1051,7 +1067,13 @@ class JavaThread: public Thread {
// Get/set the tenant which the thread is attached to // Get/set the tenant which the thread is attached to
oop tenantObj() const { return _tenantObj; } oop tenantObj() const { return _tenantObj; }
void set_tenantObj(oop tenantObj) { _tenantObj = tenantObj; } void set_tenantObj(oop tenantObj);
#if INCLUDE_ALL_GCS
G1TenantAllocationContext* tenant_allocation_context();
void set_tenant_allocation_context(G1TenantAllocationContext* context);
#endif
ThreadPriority java_priority() const; // Read from threadObj() ThreadPriority java_priority() const; // Read from threadObj()
......
...@@ -49,6 +49,17 @@ ...@@ -49,6 +49,17 @@
inline jlong Thread::cooked_allocated_bytes() { inline jlong Thread::cooked_allocated_bytes() {
jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes); jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
if (UseTLAB) { if (UseTLAB) {
if (MultiTenant && UsePerTenantTLAB) {
// accumulate used_bytes from all TLABs
size_t used_bytes = 0;
for (ThreadLocalAllocBuffer* tlab = &_tlab;
tlab != NULL;
tlab = tlab->next()) {
used_bytes += tlab->used_bytes();
}
return allocated_bytes + used_bytes;
}
size_t used_bytes = tlab().used_bytes(); size_t used_bytes = tlab().used_bytes();
if ((ssize_t)used_bytes > 0) { if ((ssize_t)used_bytes > 0) {
// More-or-less valid tlab. The load_acquire above should ensure // More-or-less valid tlab. The load_acquire above should ensure
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_THREAD_EXT_HPP #define SHARE_VM_RUNTIME_THREAD_EXT_HPP
#include "memory/allocation.hpp" #include "memory/allocation.hpp"
#include "gc_implementation/g1/g1AllocationContext.hpp"
class ThreadExt VALUE_OBJ_CLASS_SPEC { class ThreadExt VALUE_OBJ_CLASS_SPEC {
public: public:
......
...@@ -98,6 +98,7 @@ ...@@ -98,6 +98,7 @@
template(RotateGCLog) \ template(RotateGCLog) \
template(WhiteBoxOperation) \ template(WhiteBoxOperation) \
template(ClassLoaderStatsOperation) \ template(ClassLoaderStatsOperation) \
template(DestroyG1TenantAllocationContext) \
class VM_Operation: public CHeapObj<mtInternal> { class VM_Operation: public CHeapObj<mtInternal> {
public: public:
......
...@@ -58,6 +58,9 @@ template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsig ...@@ -58,6 +58,9 @@ template <MEMFLAGS F> BasicHashtableEntry<F>* BasicHashtable<F>::new_entry(unsig
len = 1 << log2_int(len); // round down to power of 2 len = 1 << log2_int(len); // round down to power of 2
assert(len >= _entry_size, ""); assert(len >= _entry_size, "");
_first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC); _first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
if (NULL != _memory_blocks) {
_memory_blocks->append(_first_free_entry);
}
_end_block = _first_free_entry + len; _end_block = _first_free_entry + len;
} }
entry = (BasicHashtableEntry<F>*)_first_free_entry; entry = (BasicHashtableEntry<F>*)_first_free_entry;
...@@ -337,6 +340,9 @@ template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end ...@@ -337,6 +340,9 @@ template <MEMFLAGS F> void BasicHashtable<F>::copy_buckets(char** top, char* end
*top += len; *top += len;
} }
unsigned int HashMapUtil::hash(oop o) {
return (unsigned int)ObjectSynchronizer::FastHashCode(JavaThread::current(), o);
}
#ifndef PRODUCT #ifndef PRODUCT
...@@ -405,6 +411,7 @@ template class HashtableEntry<Symbol*, mtClass>; ...@@ -405,6 +411,7 @@ template class HashtableEntry<Symbol*, mtClass>;
template class HashtableEntry<oop, mtSymbol>; template class HashtableEntry<oop, mtSymbol>;
template class BasicHashtableEntry<mtSymbol>; template class BasicHashtableEntry<mtSymbol>;
template class BasicHashtableEntry<mtCode>; template class BasicHashtableEntry<mtCode>;
template class BasicHashtableEntry<mtTenant>;
template class BasicHashtable<mtClass>; template class BasicHashtable<mtClass>;
template class BasicHashtable<mtSymbol>; template class BasicHashtable<mtSymbol>;
template class BasicHashtable<mtCode>; template class BasicHashtable<mtCode>;
...@@ -413,3 +420,174 @@ template class Hashtable<Method*, mtInternal>; ...@@ -413,3 +420,174 @@ template class Hashtable<Method*, mtInternal>;
template class Hashtable<Method*, mtNone>; template class Hashtable<Method*, mtNone>;
template class BasicHashtable<mtNone>; template class BasicHashtable<mtNone>;
template class Hashtable<Symbol*, mtNone>; template class Hashtable<Symbol*, mtNone>;
template class BasicHashtable<mtTenant>;
template class BasicHashtable<mtCompiler>;
#ifndef PRODUCT
// Testcase for HashMap
// customized key type for testing
class TestKey VALUE_OBJ_CLASS_SPEC {
private:
int i;
public:
TestKey(int i_) : i(i_) { }
bool operator == (const TestKey& tc) { return i == tc.i; }
bool operator != (const TestKey& tc) { return i != tc.i; }
unsigned int hash_code() { return *(unsigned int*)this; }
};
class HashMapTest : public AllStatic {
public:
static void test_basic();
static void test_customized_keytype();
static void test_for_each();
static void test_map_Iterator();
static void test_clear();
};
void HashMapTest::test_basic() {
// integer as hash key
HashMap<int, int> hm(8);
hm.put(10, 10);
hm.put(20, 2);
assert(hm.contains(10), "");
assert(hm.contains(20), "");
assert(!hm.contains(30), "");
assert(!hm.contains(0), "");
assert(10 == hm.get(10)->value(), "");
assert(2 == hm.get(20)->value(), "");
// should overwrite
hm.put(10, 11);
assert(11 == hm.get(10)->value(), "");
hm.put(20, 3);
assert(3 == hm.get(20)->value(), "");
// remove test
hm.put(18, 3);
assert(3 == hm.remove(18)->value(), "");
assert(!hm.contains(18), "");
assert(NULL == hm.remove(18), "");
assert(11 == hm.remove(10)->value(), "");
assert(!hm.contains(10), "");
assert(NULL == hm.remove(10), "");
// pointer as hash key
HashMap<void*, int> map2(8);
void* p = &hm;
map2.put(p, 10);
assert(map2.contains(p), "");
assert(!map2.contains(NULL), "");
assert(10 == map2.get(p)->value(), "");
// test overwrite
map2.put(p, 20);
assert(20 == map2.get(p)->value(), "");
}
void HashMapTest::test_customized_keytype() {
HashMap<TestKey, int> map(8);
TestKey k1(1), k2(2);
assert(0 == map.number_of_entries(), "");
map.put(k1, 2);
assert(map.contains(k1), "");
assert(2 == map.get(k1)->value(), "");
map.put(k1, 3);
assert(3 == map.get(k1)->value(), "");
assert(1 == map.number_of_entries(), "");
map.put(k1, 1);
map.put(k2, 2);
assert(2 == map.number_of_entries(), "");
assert(2 == map.get(k2)->value(), "");
assert(1 == map.get(k1)->value(), "");
}
void HashMapTest::test_for_each() {
HashMap<int, int> hm(32);
for (int i = 0; i < 32; ++i) {
hm.put(i, i + 1);
assert((i + 1) == hm.number_of_entries(), "");
}
for (HashMapIterator<int, int> itr = hm.begin();
itr != hm.end(); ++itr) {
assert(hm.contains(itr->key()), "");
// bad to modify during iteration, but this is just a test
hm.put(itr->key(), 1 + itr->value());
}
assert(32 == hm.number_of_entries(), "");
for (int i = 0; i < 32; ++i) {
assert(hm.contains(i), "");
assert((i + 2) == hm.get(i)->value(), "");
}
}
void HashMapTest::test_map_Iterator() {
HashMap<AllocationContext_t, int> map(8);
AllocationContext_t ac((G1TenantAllocationContext*)&map);
assert(map.number_of_entries() == 0, "");
HashMap<AllocationContext_t, int>::Iterator bg = map.begin(), ed = map.end();
// test different operators
assert(bg == ed, "");
assert(!(bg != ed), "");
assert(bg._idx == ed._idx, "");
assert(bg._cur == ed._cur, "");
assert(bg._map == ed._map, "");
map.put(ac, 1);
assert(map.contains(ac), "");
assert(map.get(ac)->value() == 1, "");
assert(map.number_of_entries() == 1, "");
bg = map.begin();
assert(bg != ed, "");
assert(!(bg == ed), "");
assert(bg._idx != ed._idx, "");
assert(bg._cur != ed._cur, "");
assert(bg._map == ed._map, "");
HashMap<AllocationContext_t, int>::Entry& entry = *bg;
assert(ac == entry._key, "");
assert(1 == entry._value, "");
assert(ac == bg->_key, "");
assert(1 == bg->_value, "");
}
void HashMapTest::test_clear() {
HashMap<int, int> map(16);
for (int i = 0; i < 32; ++i) {
map.put(i, i + 1);
}
assert(map.number_of_entries() == 32, "");
map.clear();
assert(map.number_of_entries() == 0, "");
for (int i = 0; i < 32; ++i) {
map.put(i, i + 1);
}
assert(map.number_of_entries() == 32, "");
}
// Test case
void Test_HashMap() {
HashMapTest::test_basic();
HashMapTest::test_for_each();
HashMapTest::test_customized_keytype();
HashMapTest::test_map_Iterator();
HashMapTest::test_clear();
}
#endif // PRODUCT
...@@ -177,6 +177,8 @@ protected: ...@@ -177,6 +177,8 @@ protected:
int _lookup_length; int _lookup_length;
void verify_lookup_length(double load); void verify_lookup_length(double load);
#endif #endif
// to record allocated memory chunks, only used by DeallocatableHashtable
GrowableArray<char*>* _memory_blocks;
void initialize(int table_size, int entry_size, int number_of_entries); void initialize(int table_size, int entry_size, int number_of_entries);
...@@ -246,6 +248,43 @@ public: ...@@ -246,6 +248,43 @@ public:
void verify() PRODUCT_RETURN; void verify() PRODUCT_RETURN;
}; };
//
// A derived BasicHashtable with dynamic memory deallocation support
//
// BasicHashtable will permanently hold memory allocated via
// NEW_C_HEAP_ARRAY2 without releasing, because it is intended
// to be used for global data structures like SymbolTable.
// Below implementation just deallocates memory chunks in destructor,
// thus may be used as transient data structure.
//
template <MEMFLAGS F> class DeallocatableHashtable : public BasicHashtable<F> {
public:
DeallocatableHashtable(int table_size, int entry_size)
: BasicHashtable<F>(table_size, entry_size)
{
// will be released in BasicHashtable<F>::release_memory()
GrowableArray<char*>*& mem_blocks = BasicHashtable<F>::_memory_blocks;
mem_blocks = new (ResourceObj::C_HEAP, F) GrowableArray<char*>(0x4 /* initial size */,
true /* on C heap */, F);
assert(NULL != mem_blocks, "pre-condition");
}
~DeallocatableHashtable() {
BasicHashtable<F>::free_buckets();
GrowableArray<char*>*& mem_blocks = BasicHashtable<F>::_memory_blocks;
assert(NULL != mem_blocks, "pre-condition");
for (GrowableArrayIterator<char*> itr = mem_blocks->begin();
itr != mem_blocks->end(); ++itr) {
FREE_C_HEAP_ARRAY(char*, *itr, F);
}
mem_blocks->clear();
delete mem_blocks;
mem_blocks = NULL;
}
};
template <class T, MEMFLAGS F> class Hashtable : public BasicHashtable<F> { template <class T, MEMFLAGS F> class Hashtable : public BasicHashtable<F> {
friend class VMStructs; friend class VMStructs;
...@@ -361,4 +400,289 @@ public: ...@@ -361,4 +400,289 @@ public:
} }
}; };
//======================================================================
// A hash map implementation based on hashtable
//======================================================================
// forward declaration
template <typename K, typename V, MEMFLAGS F> class HashMap;
template <typename K, typename V, MEMFLAGS F> class HashMapIterator;
// utility classes to extract hash_code from various types
class HashMapUtil : public AllStatic {
public:
// for integers, use its own value as hash
static unsigned int hash(long l) { return *(unsigned int*)&l; }
static unsigned int hash(int i) { return (unsigned int)i; }
static unsigned int hash(short s) { return (unsigned int)s; }
static unsigned int hash(char c) { return (unsigned int)c; }
static unsigned int hash(unsigned long sz) { return (unsigned int)(sz & 0xFFFFFFFF); }
// use the middle bits of address value
static unsigned int hash(void *p) {
#ifdef _LP64
uint64_t val = *(uint64_t*)&p;
return (unsigned int)((val & 0xFFFFFFFF) >> 3);
#else
uint64_t val = *(uint32_t*)&p;
return (unsigned int)((val & 0xFFFF) >> 2);
#endif // _LP64
}
static unsigned int hash(oop o);
static unsigned int hash(Handle h) { return hash(h()); }
// a general contract of getting hash code for all non pre-defined types
// the type must define a non-static member method `unsigned int hash_code()`
// to return an `identical` unsigned int value as hash code.
template<typename T>
static unsigned int hash(T t) { return t.hash_code(); }
};
// entry type
template <typename K, typename V, MEMFLAGS F = mtInternal>
class HashMapEntry : public BasicHashtableEntry<F> {
friend class HashMap<K, V, F>;
#ifndef PRODUCT
friend class HashMapTest;
#endif // PRODUCT
private:
K _key;
V _value;
public:
HashMapEntry* next() {
return (HashMapEntry*)BasicHashtableEntry<F>::next();
}
void set_next(HashMapEntry* next) {
BasicHashtableEntry<F>::set_next((BasicHashtableEntry<F>*)next);
}
K key() { return _key; }
V value() { return _value; }
K* key_addr() { return &_key; }
V* value_addr() { return &_value; }
};
//
// hash map class implemented in C++ based on BasicHashtable
// - unordered
// - unique
// - not MT-safe
// - deallocatable
//
template <typename K, typename V, MEMFLAGS F = mtInternal>
class HashMap : public DeallocatableHashtable<F> {
friend class HashMapIterator<K, V, F>;
public:
typedef HashMapEntry<K, V, F> Entry;
typedef HashMapIterator<K, V, F> Iterator;
// alternative hashing function
typedef int (*AltHashFunc) (K);
private:
// if table_size == 1, will cause new_entry() to fail
// have to allocate table with larger size, here using 0x4
static const int MIN_TABLE_SIZE = 0x4;
// alternativ hashing method
AltHashFunc* _alt_hasher;
protected:
unsigned int compute_hash(K k) {
if (_alt_hasher != NULL) {
return ((AltHashFunc)_alt_hasher)(k);
}
return HashMapUtil::hash(k);
}
Entry* bucket(int index) {
return (Entry*)BasicHashtable<F>::bucket(index);
}
Entry* get_entry(int index, unsigned int hash, K k) {
for (Entry* pp = bucket(index); pp != NULL; pp = pp->next()) {
if (pp->hash() == hash && pp->_key == k) {
return pp;
}
}
return NULL;
}
Entry* get_entry(K k) {
unsigned int hash = compute_hash(k);
return get_entry(BasicHashtable<F>::hash_to_index(hash), hash, k);
}
Entry* new_entry(K k, V v) {
unsigned int hash = compute_hash(k);
Entry* pp = (Entry*)BasicHashtable<F>::new_entry(hash);
pp->_key = k;
pp->_value = v;
return pp;
}
void add_entry(Entry* pp) {
int index = BasicHashtable<F>::hash_to_index(pp->hash());
BasicHashtable<F>::add_entry(index, pp);
}
public:
HashMap(int table_size)
: DeallocatableHashtable<F>((table_size < MIN_TABLE_SIZE ? MIN_TABLE_SIZE : table_size),
sizeof(Entry)),
_alt_hasher(NULL)
{ }
// Associates the specified value with the specified key in this map.
// If the map previously contained a mapping for the key, the old value is replaced.
void put(K k, V v) {
Entry* e = get_entry(k);
if (NULL != e) {
e->_value = v;
} else {
Entry* e = new_entry(k, v);
assert(NULL != e, "cannot create new entry");
add_entry(e);
}
}
Entry* remove(K k) {
int index = this->hash_to_index(compute_hash(k));
Entry *e = bucket(index);
Entry *prev = NULL;
for (; e != NULL ; prev = e, e = e->next()) {
if (e->_key == k) {
if (prev != NULL) {
prev->set_next(e->next());
} else {
this->set_entry(index, e->next());
}
this->free_entry(e);
return e;
}
}
return NULL;
}
// Returns true if this map contains a mapping for the specified key
bool contains(K k) {
return NULL != get_entry(k);
}
// Returns the entry to which the specified key is mapped,
// or null if this map contains no mapping for the key.
Entry* get(K k) {
return get_entry(k);
}
// Removes all of the mappings from this map. The map will be empty after this call returns.
void clear() {
// put all entries just into free list
for (int idx = 0; idx < BasicHashtable<F>::table_size(); ++idx) {
for (Entry* entry = bucket(idx); NULL != entry;) {
Entry* next = entry->next();
this->free_entry(entry);
entry = next;
}
BasicHashtable<F>::set_entry(idx, NULL);
}
}
Iterator begin() {
return Iterator(this);
}
Iterator end() {
Iterator itr(this);
itr._cur = NULL;
itr._idx = BasicHashtable<F>::table_size();
return itr;
}
// set an alternative hashing function
void set_alt_hasher(AltHashFunc* hash_func) {
_alt_hasher = hash_func;
}
};
// External iteration support
template <typename K, typename V, MEMFLAGS F = mtInternal>
class HashMapIterator VALUE_OBJ_CLASS_SPEC {
friend class HashMap<K, V, F>;
#ifndef PRODUCT
friend class HashMapTest;
#endif // PRODUCT
private:
typedef typename HashMap<K, V, F>::Entry Entry;
Entry* _cur;
HashMap<K, V, F>* _map;
int _idx;
public:
HashMapIterator(HashMap<K, V, F>* map)
: _map(map), _cur(NULL), _idx(0) {
for (;_idx < _map->table_size(); ++_idx) {
_cur = _map->bucket(_idx);
if (NULL != _cur) {
break;
}
}
}
HashMapIterator(const HashMapIterator<K, V, F>& other)
: _map(other._map), _cur(other._cur), _idx(other._idx)
{ }
HashMapIterator<K, V, F>& operator++() {
if (NULL != _cur) {
if (NULL != _cur->next()) {
_cur = _cur->next();
} else {
do {
++_idx;
} while (_idx < _map->table_size()
&& NULL == _map->bucket(_idx));
assert(_idx <= _map->table_size(), "pre-condition");
if (_idx == _map->table_size()) {
// end of iteration
_cur = NULL;
} else {
// move to next bucket
_cur = _map->bucket(_idx);
}
}
}
return *this;
}
HashMapIterator<K, V, F>& operator = (const HashMapIterator<K, V, F>& other) {
if (&other != this) {
_map = other._map;
_cur = other._cur;
_idx = other._idx;
}
return *this;
}
Entry& operator*() { return *_cur; }
Entry* operator->() { return _cur; }
bool operator == (const HashMapIterator<K, V, F>& other) const {
return (_map == other._map && _cur == other._cur && _idx == other._idx);
}
bool operator != (const HashMapIterator<K, V, F>& other) const {
return (_map != other._map || _cur != other._cur || _idx != other._idx);
}
};
#endif // SHARE_VM_UTILITIES_HASHTABLE_HPP #endif // SHARE_VM_UTILITIES_HASHTABLE_HPP
...@@ -64,6 +64,7 @@ template <MEMFLAGS F> inline void BasicHashtable<F>::initialize(int table_size, ...@@ -64,6 +64,7 @@ template <MEMFLAGS F> inline void BasicHashtable<F>::initialize(int table_size,
_first_free_entry = NULL; _first_free_entry = NULL;
_end_block = NULL; _end_block = NULL;
_number_of_entries = number_of_entries; _number_of_entries = number_of_entries;
_memory_blocks = NULL;
#ifdef ASSERT #ifdef ASSERT
_lookup_count = 0; _lookup_count = 0;
_lookup_length = 0; _lookup_length = 0;
......
/*
* Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Alibaba designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
/*
* @test
* @summary Test TenantContainer.containOf() to retrieve tenant container of a Java object
* @library /testlibrary
* @build TestContainerOf
* @run main/othervm -XX:+MultiTenant -XX:+TenantHeapIsolation -XX:+UseG1GC -Xmx1024M -Xms512M
* -XX:G1HeapRegionSize=1M TestContainerOf
*/
import static com.oracle.java.testlibrary.Asserts.*;
import com.alibaba.tenant.TenantConfiguration;
import com.alibaba.tenant.TenantContainer;
import com.alibaba.tenant.TenantException;
import java.util.Arrays;
public class TestContainerOf {
public static void main(String[] args) throws Exception {
new TestContainerOf().runAllTests();
}
void runAllTests() throws Exception {
testTenantContainerOf(1);
testTenantContainerOf(10);
testTenantContainerOf(80);
testRunInRootTenant();
}
// test TenantContainer.containerOf()
private void testTenantContainerOf(int count) throws Exception {
System.out.println(">> Begin TEST testTenantContainerOf: count=" + count);
Object[] objects = new Object[count];
TenantContainer[] tenants = new TenantContainer[count];
Object objInRoot = new Object();
TenantConfiguration config = new TenantConfiguration().limitHeap(32 * 1024 * 1024 /* 32 MB heap */);
assertNull(TenantContainer.containerOf(objInRoot));
for (int i = 0; i < count; ++i) {
tenants[i]= TenantContainer.create(config);
final int idx = i;
final TenantContainer thisContainer = tenants[i];
thisContainer.run(() -> {
objects[idx] = new Object();
TenantContainer current = TenantContainer.current();
assertNotNull(current);
assertTrue(current == thisContainer);
assertNotNull(TenantContainer.containerOf(objects[idx]));
});
}
for (int i = 0; i < count; ++i) {
TenantContainer containerGet = TenantContainer.containerOf(objects[i]);
assertNotNull(containerGet);
long idGet = containerGet.getTenantId();
long idCur = tenants[i].getTenantId();
assertEquals(idGet, idCur);
// assertTrue(containerGet.getTenantId() == tenants[i].getTenantId());
assertTrue(tenants[i] == containerGet);
}
Arrays.stream(tenants).forEach(t -> t.destroy());
Arrays.stream(objects).forEach(
obj -> assertNull(TenantContainer.containerOf(obj), "Should be owned by ROOT tenant"));
System.out.println("<<End TEST testTenantContainerOf\n");
}
public void testRunInRootTenant() throws TenantException {
TenantConfiguration tconfig = new TenantConfiguration().limitHeap(100 * 1024 * 1024);
final TenantContainer tenant = TenantContainer.create(tconfig);
tenant.run(() -> {
assertTrue(TenantContainer.current() == tenant);
Object obj = TenantContainer.primitiveRunInRoot(()->{
//should be in root tenant.
assertTrue(TenantContainer.current() == null);
return new Object();
});
//obj should be allocated in root tenant.
assertTrue(TenantContainer.containerOf(obj) == null);
});
}
}
此差异已折叠。
#!/usr/bin/env bash
#
# Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Alibaba designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# @test TestMultiTenantOptionDeps
# @summary Test the dependencies of multi-tenant options
# @run shell TestMultiTenantOptionDeps.sh
#
if [ "${TESTSRC}" = "" ]
then
TESTSRC=${PWD}
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi
echo "TESTSRC=${TESTSRC}"
## Adding common setup Variables for running shell tests.
. ${TESTSRC}/../test_env.sh
JAVA=${TESTJAVA}${FS}bin${FS}java
set -x
# if $FROM is enabled, $TO should be enabled automatically
function check_dependency_bool_bool() {
FROM=$1
TO="$(echo $2 | sed 's/-XX:+//g')"
if [ -z "$(${JAVA} ${FROM} -XX:+PrintFlagsFinal -version 2>&1 | grep ${TO} | grep '= true')" ]; then
echo "check_dependency_bool_bool failed: $1 --> $2"
exit 1
fi
}
function check_dependency_bool_bool_false() {
FROM=$1
TO="$(echo $2 | sed 's/-XX:+//g')"
if [ -z "$(${JAVA} ${FROM} -XX:+PrintFlagsFinal -version 2>&1 | grep ${TO} | grep '= false')" ]; then
echo "check_dependency_bool_bool failed: $1 --> $2"
exit 1
fi
}
# check if provided jvm arguments is invalid
function assert_invalid_jvm_options() {
JVM_ARGS=$1
CMD="${JAVA} ${JVM_ARGS} -version"
OUT=$(${CMD} 2>&1)
if [ 0 -eq $? ]; then
echo "Expected invalid JVM arguments: ${JVM_ARGS}"
exit 1
fi
}
check_dependency_bool_bool '-XX:+UseG1GC -XX:+TenantHeapIsolation' '-XX:+MultiTenant'
assert_invalid_jvm_options '-XX:+TenantHeapIsolation'
assert_invalid_jvm_options '-XX:+TenantHeapIsolation -XX:+UseConcMarkSweepGC'
assert_invalid_jvm_options '-XX:+UseG1GC -XX:+TenantHeapIsolation -XX:-MultiTenant'
#!/usr/bin/env bash
#
# Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation. Alibaba designates this
# particular file as subject to the "Classpath" exception as provided
# by Oracle in the LICENSE file that accompanied this code.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
# @test TestParGCAllocatorLeak.sh
# @summary test memory leak of G1ParGCAllocator
# @run shell TestParGCAllocatorLeak.sh
#
if [ "${TESTSRC}" = "" ]
then
TESTSRC=${PWD}
echo "TESTSRC not set. Using "${TESTSRC}" as default"
fi
echo "TESTSRC=${TESTSRC}"
# Adding common setup Variables for running shell tests.
. ${TESTSRC}/../test_env.sh
JAVA=${TESTJAVA}${FS}bin${FS}java
JAVAC=${TESTJAVA}${FS}bin${FS}javac
JCMD=${TESTJAVA}${FS}bin${FS}jcmd
TEST_CLASS=TestLeak
TEST_SRC=$TEST_CLASS.java
###################################################################################
cat > $TEST_SRC << EOF
import com.alibaba.tenant.*;
import java.util.*;
class $TEST_CLASS {
public static void main(String[] args) {
int nofCpus = Runtime.getRuntime().availableProcessors();
Thread[] threads = new Thread[nofCpus];
for (int i = 0; i < nofCpus; ++i) {
threads[i] = new Thread(()->{
TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(32 * 1024 * 1024));
try {
tenant.run(()->{
while (true) {
Object o = new byte[1024];
}
});
} catch (Exception e) {
throw new RuntimeException(e);
}
});
threads[i].start();
}
Arrays.stream(threads).forEach(t->{
try {
t.join();
} catch (Exception e) {
throw new RuntimeException(e);
}
});
}
}
EOF
# Do compilation
${JAVAC} ${TEST_SRC}
if [ $? != '0' ]
then
echo "Failed to compile ${TEST_SRC}"
exit 1
fi
set -x
${JAVA} -cp . -XX:+UseG1GC -XX:+MultiTenant -XX:+TenantHeapIsolation -XX:NativeMemoryTracking=detail -XX:+PrintGCDetails -Xloggc:gc.log -Xmx1g -Xmn32m ${TEST_CLASS} > ${TEST_CLASS}.log 2>&1 &
sleep 5
PID=$(ps ax | grep ${TEST_CLASS} | grep -v grep | awk '{print $1}')
if [ -z "$PID" ] && [ "$(echo $PID | wc -w)" -gt 1 ] ; then
echo "BAD pid!"
exit 1
fi
# set up baseline
$JCMD $PID VM.native_memory baseline
# sleep 30s
sleep 30
# check differences of below sections
NMT_SECTIONS=("Internal" "Tenant")
for MEM_SEC in ${NMT_SECTIONS[*]}; do
DIFF=$($JCMD $PID VM.native_memory summary.diff | grep -A3 ${MEM_SEC} | grep malloc | grep -v grep)
if [ ! -z "$(echo $DIFF | grep +)" ] && [ -z "$(echo $DIFF | awk '{print $2}' | grep \#)" ]; then
DIFF=$(echo $DIFF | awk '{print $2}')
echo "DIFF=$DIFF"
if [ ! -z "$(echo $DIFF | grep KB)" ]; then
# only check result if diff >= 1kb
DIFF_V="$(echo $DIFF | sed -e 's/KB//g' -e 's/+//g' -e 's/-//g')"
if [ -z $DIFF_V]; then
echo "Bad diff value $DIFF_V"
kill -9 $PID
exit 1
fi
if [ $DIFF_V -gt 1024 ]; then
echo "Diff value is great than 1 MB, maybe leaking!!"
kill -9 $PID
exit 1
fi
fi
# just checking
if [ ! -z "$(echo $DIFF | grep MB)" ] || [ ! -z "$(echo $DIFF | grep GB)" ]; then
echo "Cannot in MB or GB scale mode"
kill -9 $PID
exit 1
fi
else
echo "No significant memory size changed, skipping"
fi
done
kill -9 $PID
/*
* Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Alibaba designates this
* particular file as subject to the "Classpath" exception as provided
* by Oracle in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
*/
/*
* @test
* @summary Test retain and reuse of TLAB
* @library /testlibrary /testlibrary/whitebox
* @build TestPerTenantTLAB
* @run main ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+UsePerTenantTLAB -XX:+TenantHeapIsolation -XX:+UseG1GC -XX:+UseTLAB -XX:TLABSize=65535 -Xmx1024M -Xms512M -XX:G1HeapRegionSize=1M TestPerTenantTLAB
*
*/
import static com.oracle.java.testlibrary.Asserts.*;
import com.alibaba.tenant.TenantConfiguration;
import com.alibaba.tenant.TenantContainer;
import com.alibaba.tenant.TenantException;
import com.alibaba.tenant.TenantState;
import sun.hotspot.WhiteBox;
import java.util.concurrent.CountDownLatch;
public class TestPerTenantTLAB {
private static final WhiteBox WB = WhiteBox.getWhiteBox();
private static final int G1_HEAP_REGION_SIZE = WB.g1RegionSize();
private static final int G1_HEAP_REGION_MASK = (0xFFFFFFFF << Integer.numberOfTrailingZeros(G1_HEAP_REGION_SIZE));
// non-adaptive TLAB size, see -XX:TLABSize from command line options
private static final int TLAB_SIZE = 65535;
private void testRetainReuseTLABBasic() {
TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(64 * 1024 * 1024));
Object[] refs = new Object[16];
WB.fullGC();
// after full GC, below allocation request will start from the beginning (almost) of new EDEN regions.
try {
refs[0] = new Object();
refs[1] = new Object();
assertInCurrentTLAB(refs[0], refs[1]);
tenant.run(()->{
refs[2] = new Object();
refs[3] = new Object();
assertInCurrentTLAB(refs[2], refs[3]);
assertNotInCurrentTLAB(refs[0], refs[1]);
});
assertNotInCurrentTLAB(refs[2], refs[3]);
assertInCurrentTLAB(refs[0], refs[1]);
assertNotInSameRegion(refs[1], refs[2]);
refs[4] = new Object();
refs[5] = new Object();
assertInCurrentTLAB(refs[4], refs[5]);
assertNotInSameRegion(refs[3], refs[4]);
assertInCurrentTLAB(refs[0], refs[1], refs[4], refs[5]);
tenant.run(()->{
refs[6] = new Object();
refs[7] = new Object();
assertInCurrentTLAB(refs[2], refs[3], refs[6], refs[7]);
assertNotInSameRegion(refs[4], refs[7]);
assertNotInSameRegion(refs[5], refs[6]);
});
refs[8] = new Object();
refs[9] = new Object();
assertInCurrentTLAB(refs[0], refs[1], refs[4], refs[5], refs[8], refs[9]);
} catch (TenantException e) {
throw new RuntimeException(e);
} finally {
tenant.destroy();
}
}
private void testChildThread() {
TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(64 * 1024 * 1024));
Object[] refs = new Object[16];
WB.fullGC();
// after full GC, below allocation request will start from the beginning (almost) of new EDEN regions.
try {
tenant.run(()-> {
refs[0] = new Object();
refs[1] = new Object();
assertInCurrentTLAB(refs[0], refs[1]);
Thread t = new Thread(()->{
refs[2] = new Object();
refs[3] = new Object();
assertInCurrentTLAB(refs[2], refs[3]);
assertNotInCurrentTLAB(refs[0], refs[1]);
TenantContainer.primitiveRunInRoot(()-> {
refs[4] = new Object();
refs[5] = new Object();
assertInCurrentTLAB(refs[4], refs[5]);
assertNotInSameRegion(refs[2], refs[5]);
});
});
// wait child thread to end
t.start();
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
// now newly allocated ROOT threads should be in same TLAB of previous ROOT objects
TenantContainer.primitiveRunInRoot(()->{
refs[6] = new Object();
refs[7] = new Object();
assertInCurrentTLAB(refs[6], refs[7]);
assertInSameRegion(refs[4], refs[6]);
assertInSameRegion(refs[4], refs[7]);
});
});
} catch (TenantException e) {
throw new RuntimeException(e);
}
refs[8] = new Object();
refs[9] = new Object();
assertInCurrentTLAB(refs[8], refs[9], refs[6], refs[7]);
Thread t = new Thread(()->{
refs[10] = new Object();
refs[11] = new Object();
assertInCurrentTLAB(refs[10], refs[11]);
assertNotInCurrentTLAB(refs[8], refs[9]);
assertInSameRegion(refs[8], refs[10]);
assertInSameRegion(refs[4], refs[11]);
assertInSameRegion(refs[5], refs[11]);
assertInSameRegion(refs[6], refs[11]);
});
t.start();
try {
t.join();
} catch (InterruptedException e) {
throw new RuntimeException(e);
} finally {
tenant.destroy();
}
}
private void testAfterDestroyTenant() {
TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(64 * 1024 * 1024));
Object[] refs = new Object[2];
CountDownLatch cdl = new CountDownLatch(1);
CountDownLatch started = new CountDownLatch(1);
assertInCurrentTLAB(refs, cdl);
System.gc();
Thread thread = new Thread(()->{
try {
tenant.run(()->{
refs[0] = new Object();
assertTrue(TenantContainer.containerOf(refs[0]) == tenant);
assertTrue(WB.isInCurrentTLAB(refs[0]));
started.countDown();
// attach and hold
try {
cdl.await();
} catch (InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
});
} catch (TenantException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
});
thread.start();
try {
started.await();
assertTrue(TenantContainer.containerOf(refs[0]) == tenant);
tenant.destroy();
assertTrue(tenant.getState() == TenantState.DEAD);
// should have been moved to root
assertNull(TenantContainer.containerOf(refs[0]));
// trigger destroy
cdl.countDown();
thread.join();
} catch (InterruptedException e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
public static void main(String[] args) {
TestPerTenantTLAB test = new TestPerTenantTLAB();
test.testRetainReuseTLABBasic();
test.testChildThread();
test.testAfterDestroyTenant();
}
private static void assertInCurrentTLAB(Object...objs) {
for (Object o : objs) {
assertTrue(WB.isInCurrentTLAB(o));
}
}
private static void assertNotInSameTLAB(Object o1, Object o2) {
assertGreaterThanOrEqual((int)Math.abs(WB.getObjectAddress(o1) - WB.getObjectAddress(o2)), TLAB_SIZE);
}
private static void assertNotInCurrentTLAB(Object... objs) {
for (Object o : objs) {
assertFalse(WB.isInCurrentTLAB(o));
}
}
private static void assertNotInSameRegion(Object o1, Object o2) {
int addr1 = (int)WB.getObjectAddress(o1) & G1_HEAP_REGION_MASK;
int addr2 = (int)WB.getObjectAddress(o2) & G1_HEAP_REGION_MASK;
assertNotEquals(addr1, addr2);
}
private static void assertInSameRegion(Object o1, Object o2) {
int addr1 = (int)WB.getObjectAddress(o1) & G1_HEAP_REGION_MASK;
int addr2 = (int)WB.getObjectAddress(o2) & G1_HEAP_REGION_MASK;
assertEquals(addr1, addr2);
}
}
...@@ -267,4 +267,7 @@ public class WhiteBox { ...@@ -267,4 +267,7 @@ public class WhiteBox {
public native boolean isContainerized(); public native boolean isContainerized();
public native void printOsInfo(); public native void printOsInfo();
// TLAB
public native boolean isInCurrentTLAB(Object obj);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册