diff --git a/make/linux/makefiles/mapfile-vers-debug b/make/linux/makefiles/mapfile-vers-debug
index 12ba424d8827764b3e30da9efcf8cea8a2f59f9e..9b73771152f0448109e78fdaa13ae633e4017b70 100644
--- a/make/linux/makefiles/mapfile-vers-debug
+++ b/make/linux/makefiles/mapfile-vers-debug
@@ -285,7 +285,12 @@ SUNWprivate_1.1 {
# INSERT VTABLE SYMBOLS HERE
+ JVM_TenantContainerOf;
JVM_AttachToTenant;
+ JVM_CreateTenantAllocationContext;
+ JVM_DestroyTenantAllocationContext;
+ JVM_GetTenantOccupiedMemory;
+
local:
*;
diff --git a/make/linux/makefiles/mapfile-vers-product b/make/linux/makefiles/mapfile-vers-product
index 73eb0de98f838835d2ec12f63a7c98702ef368a3..bb0d34765613a86a90f7ee51621207862f363d63 100644
--- a/make/linux/makefiles/mapfile-vers-product
+++ b/make/linux/makefiles/mapfile-vers-product
@@ -280,7 +280,11 @@ SUNWprivate_1.1 {
# INSERT VTABLE SYMBOLS HERE
+ JVM_TenantContainerOf;
JVM_AttachToTenant;
+ JVM_CreateTenantAllocationContext;
+ JVM_DestroyTenantAllocationContext;
+ JVM_GetTenantOccupiedMemory;
local:
*;
diff --git a/src/share/vm/classfile/javaClasses.cpp b/src/share/vm/classfile/javaClasses.cpp
index feb86a60adb08d3f8a49a91ffceabcd828b79074..19ea069261222577aedc0f443e3ad24b6df41817 100644
--- a/src/share/vm/classfile/javaClasses.cpp
+++ b/src/share/vm/classfile/javaClasses.cpp
@@ -52,6 +52,10 @@
#include "runtime/vframe.hpp"
#include "utilities/preserveException.hpp"
+#if INCLUDE_ALL_GCS
+#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
+#endif // INCLUDE_ALL_GCS
+
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
#define INJECTED_FIELD_COMPUTE_OFFSET(klass, name, signature, may_be_java) \
@@ -3247,6 +3251,13 @@ int java_lang_AssertionStatusDirectives::packages_offset;
int java_lang_AssertionStatusDirectives::packageEnabled_offset;
int java_lang_AssertionStatusDirectives::deflt_offset;
int java_nio_Buffer::_limit_offset;
+#if INCLUDE_ALL_GCS
+int com_alibaba_tenant_TenantContainer::_allocation_context_offset;
+int com_alibaba_tenant_TenantContainer::_tenant_id_offset;
+int com_alibaba_tenant_TenantContainer::_tenant_state_offset;
+
+int com_alibaba_tenant_TenantState::_static_state_offsets[com_alibaba_tenant_TenantState::TS_SIZE] = { 0 };
+#endif // INCLUDE_ALL_GCS
int java_util_concurrent_locks_AbstractOwnableSynchronizer::_owner_offset = 0;
int sun_reflect_ConstantPool::_oop_offset;
int sun_reflect_UnsafeStaticFieldAccessorImpl::_base_offset;
@@ -3306,6 +3317,73 @@ void java_nio_Buffer::compute_offsets() {
compute_offset(_limit_offset, k, vmSymbols::limit_name(), vmSymbols::int_signature());
}
+#if INCLUDE_ALL_GCS
+
+// Support for com.alibaba.tenant.TenantContainer
+
+void com_alibaba_tenant_TenantContainer::compute_offsets() {
+ Klass* k = SystemDictionary::com_alibaba_tenant_TenantContainer_klass();
+ assert(k != NULL, "Cannot find TenantContainer in current JDK");
+ compute_offset(_tenant_id_offset, k, vmSymbols::tenant_id_address(), vmSymbols::long_signature());
+ compute_offset(_allocation_context_offset, k, vmSymbols::allocation_context_address(), vmSymbols::long_signature());
+ compute_offset(_tenant_state_offset, k, vmSymbols::state_name(), vmSymbols::com_alibaba_tenant_TenantState_signature());
+}
+
+jlong com_alibaba_tenant_TenantContainer::get_tenant_id(oop obj) {
+ assert(obj != NULL, "TenantContainer object cannot be NULL");
+ return obj->long_field(_tenant_id_offset);
+}
+
+G1TenantAllocationContext* com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(oop obj) {
+ assert(obj != NULL, "TenantContainer object cannot be NULL");
+ return (G1TenantAllocationContext*)(obj->long_field(_allocation_context_offset));
+}
+
+void com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(oop obj, G1TenantAllocationContext* context) {
+ assert(obj != NULL, "TenantContainer object cannot be NULL");
+ obj->long_field_put(_allocation_context_offset, (jlong)context);
+}
+
+bool com_alibaba_tenant_TenantContainer::is_dead(oop obj) {
+ assert(obj != NULL, "TenantContainer object cannot be NULL");
+ int state = com_alibaba_tenant_TenantState::state_of(obj);
+ return state == com_alibaba_tenant_TenantState::TS_STOPPING
+ || state == com_alibaba_tenant_TenantState::TS_DEAD;
+}
+
+oop com_alibaba_tenant_TenantContainer::get_tenant_state(oop obj) {
+ assert(obj != NULL, "TenantContainer object cannot be NULL");
+ obj->obj_field(_tenant_state_offset);
+}
+
+// Support for com.alibaba.tenant.TenantState
+
+int com_alibaba_tenant_TenantState::state_of(oop tenant_obj) {
+ assert(tenant_obj != NULL, "TenantContainer ");
+
+ oop tenant_state = com_alibaba_tenant_TenantContainer::get_tenant_state(tenant_obj);
+ InstanceKlass* ik = InstanceKlass::cast(SystemDictionary::com_alibaba_tenant_TenantState_klass());
+
+ for (int i = TS_STARTING; i < TS_SIZE; ++i) {
+ assert(_static_state_offsets[i] == i * heapOopSize, "Must have been initialized");
+ address addr = ik->static_field_addr(_static_state_offsets[i]);
+ oop o = NULL;
+ if (UseCompressedOops) {
+ o = oopDesc::load_decode_heap_oop((narrowOop*)addr);
+ } else {
+ o = oopDesc::load_decode_heap_oop((oop*)addr);
+ }
+ assert(!oopDesc::is_null(o), "sanity");
+ if (tenant_state == o) {
+ return i;
+ }
+ }
+
+ ShouldNotReachHere();
+}
+
+#endif // INCLUDE_ALL_GCS
+
void java_util_concurrent_locks_AbstractOwnableSynchronizer::initialize(TRAPS) {
if (_owner_offset != 0) return;
@@ -3415,6 +3493,10 @@ void JavaClasses::compute_offsets() {
// generated interpreter code wants to know about the offsets we just computed:
AbstractAssembler::update_delayed_values();
+
+ if(MultiTenant) {
+ com_alibaba_tenant_TenantContainer::compute_offsets();
+ }
}
#ifndef PRODUCT
diff --git a/src/share/vm/classfile/javaClasses.hpp b/src/share/vm/classfile/javaClasses.hpp
index 0516e37f2dc97d1b70c734f35549ddeb2ceae623..ba08769e699aa9eabfed812779f05957ddc80f42 100644
--- a/src/share/vm/classfile/javaClasses.hpp
+++ b/src/share/vm/classfile/javaClasses.hpp
@@ -1404,6 +1404,45 @@ class java_util_concurrent_locks_AbstractOwnableSynchronizer : AllStatic {
static oop get_owner_threadObj(oop obj);
};
+#if INCLUDE_ALL_GCS
+
+class G1TenantAllocationContext;
+
+class com_alibaba_tenant_TenantContainer : AllStatic {
+private:
+ static int _tenant_id_offset;
+ static int _allocation_context_offset;
+ static int _tenant_state_offset;
+public:
+ static jlong get_tenant_id(oop obj);
+ static G1TenantAllocationContext* get_tenant_allocation_context(oop obj);
+ static void set_tenant_allocation_context(oop obj, G1TenantAllocationContext* context);
+ static oop get_tenant_state(oop obj);
+ static bool is_dead(oop obj);
+ static void compute_offsets();
+};
+
+class com_alibaba_tenant_TenantState : AllStatic {
+ friend class JavaClasses;
+public:
+ // C++ level definition of tenant status
+ enum {
+ TS_STARTING = 0,
+ TS_RUNNING = 1,
+ TS_STOPPING = 2,
+ TS_DEAD = 3,
+ TS_SIZE,
+ };
+
+private:
+ // offsets
+ static int _static_state_offsets[TS_SIZE];
+public:
+ static int state_of(oop tenant_obj);
+};
+
+#endif // INCLUDE_ALL_GCS
+
// Use to declare fields that need to be injected into Java classes
// for the JVM to use. The name_index and signature_index are
// declared in vmSymbols. The may_be_java flag is used to declare
diff --git a/src/share/vm/classfile/vmSymbols.hpp b/src/share/vm/classfile/vmSymbols.hpp
index 619e6f0bc4dc6307af78ba56be25d59d2af5ad36..c69898f24fe8331bd11fe97f780008dc7cd90575 100644
--- a/src/share/vm/classfile/vmSymbols.hpp
+++ b/src/share/vm/classfile/vmSymbols.hpp
@@ -890,7 +890,8 @@
do_intrinsic(_updateByteBufferCRC32, java_util_zip_CRC32, updateByteBuffer_name, updateByteBuffer_signature, F_SN) \
do_name( updateByteBuffer_name, "updateByteBuffer") \
do_signature(updateByteBuffer_signature, "(IJII)I") \
- /* support for com.alibaba.tenant.TenantContainer */ \
+ \
+ /* support for com.alibaba.tenant.TenantContainer */ \
do_name( allocation_context_address, "allocationContext") \
do_name( tenant_id_address, "tenantId") \
\
diff --git a/src/share/vm/gc_implementation/g1/evacuationInfo.hpp b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp
index 97e0ab2f7357b46b59866d1b36f15af3ab90c62f..f592840b5825ba06ce183436b0cc3808f634fd99 100644
--- a/src/share/vm/gc_implementation/g1/evacuationInfo.hpp
+++ b/src/share/vm/gc_implementation/g1/evacuationInfo.hpp
@@ -49,6 +49,11 @@ public:
_allocation_regions = allocation_regions;
}
+ void increment_allocation_regions(uint allocation_regions) {
+ assert(TenantHeapIsolation, "pre-condition");
+ _allocation_regions += allocation_regions;
+ }
+
void set_collectionset_used_before(size_t used) {
_collectionset_used_before = used;
}
@@ -61,6 +66,13 @@ public:
_alloc_regions_used_before = used;
}
+ // For multi-tenant mode, multiple calls to set_alloc_regions_used_before() may happen,
+ // thus change to below method to accumulate those results
+ void increment_alloc_regions_used_before(size_t used) {
+ assert(TenantHeapIsolation, "pre-condition");
+ _alloc_regions_used_before += used;
+ }
+
void set_bytes_copied(size_t copied) {
_bytes_copied = copied;
}
diff --git a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
index f92ae110231aa9902638e7025cf9f8bd6d36b586..74e7e5783af1bf800b509c0e755b067bf696bda4 100644
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.cpp
@@ -248,6 +248,11 @@ HeapRegion* MutatorAllocRegion::allocate_new_region(size_t word_size,
void MutatorAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
+ DEBUG_ONLY(if (TenantHeapIsolation) {
+ assert(alloc_region->allocation_context() == allocation_context(),
+ "Inconsistent allocation contexts");
+ });
+
_g1h->retire_mutator_alloc_region(alloc_region, allocated_bytes);
}
@@ -259,6 +264,11 @@ HeapRegion* SurvivorGCAllocRegion::allocate_new_region(size_t word_size,
void SurvivorGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
+ DEBUG_ONLY(if (TenantHeapIsolation) {
+ assert(alloc_region->allocation_context() == allocation_context(),
+ "HeapRegion's context should be same as SurvivorGCAllocRegion's");
+ });
+
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Young);
}
@@ -270,6 +280,11 @@ HeapRegion* OldGCAllocRegion::allocate_new_region(size_t word_size,
void OldGCAllocRegion::retire_region(HeapRegion* alloc_region,
size_t allocated_bytes) {
+ DEBUG_ONLY(if (TenantHeapIsolation) {
+ assert(alloc_region->allocation_context() == allocation_context(),
+ "HeapRegion's context should be same as OldGCAllocRegion's");
+ });
+
_g1h->retire_gc_alloc_region(alloc_region, allocated_bytes, InCSetState::Old);
}
diff --git a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
index 2edc6545ce60256c59e9dffd7a6fe80cb02b2579..3f0fa8c8497f2bdbca182d92d0b64c9235ca594d 100644
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.hpp
@@ -144,8 +144,18 @@ public:
return (hr == _dummy_region) ? NULL : hr;
}
- void set_allocation_context(AllocationContext_t context) { _allocation_context = context; }
- AllocationContext_t allocation_context() { return _allocation_context; }
+ void set_allocation_context(AllocationContext_t context) {
+ _allocation_context = context;
+ }
+
+ const AllocationContext_t& allocation_context() const {
+ return _allocation_context;
+ }
+
+ const G1TenantAllocationContext* tenant_allocation_context() const {
+ assert(TenantHeapIsolation, "pre-condition");
+ return allocation_context().tenant_allocation_context();
+ }
uint count() { return _count; }
diff --git a/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp
index 4d5ff334b6f0554c51d521c4bf90864b7454b0e6..75bfa32d8609eb1f779e8f470ac58f47c2ae4aad 100644
--- a/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp
+++ b/src/share/vm/gc_implementation/g1/g1AllocRegion.inline.hpp
@@ -60,6 +60,15 @@ inline HeapWord* G1AllocRegion::attempt_allocation(size_t word_size,
HeapRegion* alloc_region = _alloc_region;
assert(alloc_region != NULL, ar_ext_msg(this, "not initialized properly"));
+ DEBUG_ONLY(if (TenantHeapIsolation
+ /* (_alloc_region == _dummy_region) means current AllocRegion has not yet
+ * really initialized
+ */
+ && alloc_region != G1AllocRegion::_dummy_region) {
+ assert(allocation_context() == alloc_region->allocation_context(),
+ "Tring to allocate in the wrong heap region");
+ });
+
HeapWord* result = par_allocate(alloc_region, word_size, bot_updates);
if (result != NULL) {
trace("alloc", word_size, result);
@@ -82,6 +91,12 @@ inline HeapWord* G1AllocRegion::attempt_allocation_locked(size_t word_size,
retire(true /* fill_up */);
result = new_alloc_region_and_allocate(word_size, false /* force */);
if (result != NULL) {
+ DEBUG_ONLY(if (TenantHeapIsolation) {
+ // _alloc_region was updated, check its tenant alloc context
+ assert(allocation_context() == _alloc_region->allocation_context(),
+ "Allocate in wrong region");
+ });
+
trace("alloc locked (second attempt)", word_size, result);
return result;
}
@@ -97,6 +112,12 @@ inline HeapWord* G1AllocRegion::attempt_allocation_force(size_t word_size,
trace("forcing alloc");
HeapWord* result = new_alloc_region_and_allocate(word_size, true /* force */);
if (result != NULL) {
+ DEBUG_ONLY(if (TenantHeapIsolation) {
+ // _alloc_region was updated, check its tenant alloc context
+ assert(allocation_context() == _alloc_region->allocation_context(),
+ "Allocate in wrong region");
+ });
+
trace("alloc forced", word_size, result);
return result;
}
diff --git a/src/share/vm/gc_implementation/g1/g1AllocationContext.cpp b/src/share/vm/gc_implementation/g1/g1AllocationContext.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..648ff6534139ec29e8ae0f2eff19364bfbe8c2e5
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1AllocationContext.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "runtime/thread.hpp"
+
+// 0 will be returned if in ROOT tenant or memory isolation not enabled
+AllocationContext_t AllocationContext::_root_context;
+
+AllocationContext_t AllocationContext::current() {
+ return Thread::current()->allocation_context();
+}
+
+AllocationContext_t AllocationContext::system() {
+ return _root_context;
+}
+
+AllocationContextMark::AllocationContextMark(AllocationContext_t ctxt)
+ : _saved_context(Thread::current()->allocation_context()) {
+ Thread* thrd = Thread::current();
+ thrd->set_allocation_context(ctxt);
+}
+
+AllocationContextMark::~AllocationContextMark() {
+ Thread* thrd = Thread::current();
+ thrd->set_allocation_context(_saved_context);
+}
diff --git a/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp b/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp
index c1089268ca6c97c65f141e7eda8394bf7724c5f7..5434f1533d55dcadb3de004b89f4154635755467 100644
--- a/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp
+++ b/src/share/vm/gc_implementation/g1/g1AllocationContext.hpp
@@ -26,19 +26,105 @@
#define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
#include "memory/allocation.hpp"
+#include "utilities/hashtable.hpp"
-typedef unsigned char AllocationContext_t;
+class G1TenantAllocationContext;
+
+/*
+ * Typical scenario to use AllocationContext_t:
+ * _g1h->_allocator->mutator_alloc_buffer(alloc_context)->attempt_allocation(...)
+ *
+ * Here we just simply make AllocationContext_t to contain a pointer of
+ * G1TenantAllocationContext if TenantHeapIsolation enabled
+ *
+ */
+class AllocationContext_t VALUE_OBJ_CLASS_SPEC {
+private:
+ union {
+ volatile G1TenantAllocationContext* _tenant_alloc_context; // Pointer to corresponding tenant allocation context
+ unsigned char _alloc_context; // unused, original value type from OpenJDK
+ } _value;
+
+public:
+ AllocationContext_t(const uint64_t val) {
+ _value._tenant_alloc_context = (G1TenantAllocationContext*)val;
+ }
+ AllocationContext_t() { _value._tenant_alloc_context = NULL; }
+ AllocationContext_t(const AllocationContext_t& peer) : _value(peer._value) { }
+ AllocationContext_t(G1TenantAllocationContext* ctxt) {
+ Atomic::store_ptr(ctxt, &_value._tenant_alloc_context);
+ }
+
+ G1TenantAllocationContext* tenant_allocation_context() const {
+ assert(TenantHeapIsolation, "pre-condition");
+ return (G1TenantAllocationContext*)_value._tenant_alloc_context;
+ }
+
+ // This method is useless for now, since the original implementation does not differentiate
+ // system & current allocation context.
+ // if Oracle makes any changes to AllocationContext_t in future, pls update below method as well
+ const unsigned char allocation_context() const { return 0; }
+
+ // Comparing the longest field in union
+ // operator ==
+ inline bool operator ==(const AllocationContext_t& ctxt) const {
+ return _value._tenant_alloc_context == ctxt._value._tenant_alloc_context;
+ }
+ inline bool operator ==(const G1TenantAllocationContext* tac) const {
+ return _value._tenant_alloc_context == tac;
+ }
+ inline bool operator ==(unsigned char alloc_context) const {
+ return _value._alloc_context == alloc_context;
+ }
+
+ // operator !=
+ inline bool operator !=(const AllocationContext_t& ctxt) const {
+ return _value._tenant_alloc_context != ctxt._value._tenant_alloc_context;
+ }
+ inline bool operator !=(const G1TenantAllocationContext* tac) const {
+ return _value._tenant_alloc_context != tac;
+ }
+ inline bool operator !=(unsigned char alloc_context) const {
+ return _value._alloc_context != alloc_context;
+ }
+
+ // operator =
+ inline AllocationContext_t& operator =(const AllocationContext_t& ctxt) {
+ Atomic::store_ptr((void*)ctxt._value._tenant_alloc_context, &_value._tenant_alloc_context);
+ return *this;
+ }
+ inline AllocationContext_t& operator =(const G1TenantAllocationContext* tac) {
+ Atomic::store_ptr(const_cast(tac), &_value._tenant_alloc_context);
+ return *this;
+ }
+ inline AllocationContext_t& operator =(unsigned char alloc_context) {
+ _value._alloc_context = alloc_context;
+ return *this;
+ }
+
+ inline const bool is_system() const { return NULL == _value._tenant_alloc_context; }
+
+ // to enable AllocationContext_t to be used as key type in HashMap
+ unsigned int hash_code() {
+ void *p = (void*)_value._tenant_alloc_context;
+ return HashMapUtil::hash(p);
+ }
+
+ inline G1TenantAllocationContext* operator -> () const {
+ return tenant_allocation_context();
+ }
+};
class AllocationContext : AllStatic {
+private:
+ static AllocationContext_t _root_context;
+
public:
// Currently used context
- static AllocationContext_t current() {
- return 0;
- }
+ static AllocationContext_t current();
+
// System wide default context
- static AllocationContext_t system() {
- return 0;
- }
+ static AllocationContext_t system();
};
class AllocationContextStats: public StackObj {
@@ -49,4 +135,13 @@ public:
inline bool available() { return false; }
};
+// To switch current to target AllocationContext_t during the lifespan of this object
+class AllocationContextMark : public StackObj {
+private:
+ AllocationContext_t _saved_context;
+public:
+ AllocationContextMark(AllocationContext_t ctxt);
+ ~AllocationContextMark();
+};
+
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCATIONCONTEXT_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1Allocator.cpp b/src/share/vm/gc_implementation/g1/g1Allocator.cpp
index 0d1ab84117e482ac5ce48579caf0c001fbd54832..e6ebc5f8ceb1af4aed1cfafe022b3f1016c95f3e 100644
--- a/src/share/vm/gc_implementation/g1/g1Allocator.cpp
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.cpp
@@ -30,21 +30,85 @@
#include "gc_implementation/g1/heapRegionSet.inline.hpp"
void G1DefaultAllocator::init_mutator_alloc_region() {
+ if (TenantHeapIsolation) {
+ G1TenantAllocationContexts::init_mutator_alloc_regions();
+ }
+
assert(_mutator_alloc_region.get() == NULL, "pre-condition");
_mutator_alloc_region.init();
}
void G1DefaultAllocator::release_mutator_alloc_region() {
+ if (TenantHeapIsolation) {
+ G1TenantAllocationContexts::release_mutator_alloc_regions();
+ }
+
_mutator_alloc_region.release();
assert(_mutator_alloc_region.get() == NULL, "post-condition");
}
+MutatorAllocRegion* G1DefaultAllocator::mutator_alloc_region(AllocationContext_t context) {
+ if (TenantHeapIsolation && !context.is_system()) {
+ G1TenantAllocationContext* tac = context.tenant_allocation_context();
+ assert(NULL != tac, "Tenant alloc context cannot be NULL");
+ return tac->mutator_alloc_region();
+ }
+ return &_mutator_alloc_region;
+}
+
+SurvivorGCAllocRegion* G1DefaultAllocator::survivor_gc_alloc_region(AllocationContext_t context) {
+ if (TenantHeapIsolation && !context.is_system()) {
+ G1TenantAllocationContext* tac = context.tenant_allocation_context();
+ assert(NULL != tac, "Tenant alloc context cannot be NULL");
+ return tac->survivor_gc_alloc_region();
+ }
+ return &_survivor_gc_alloc_region;
+}
+
+OldGCAllocRegion* G1DefaultAllocator::old_gc_alloc_region(AllocationContext_t context) {
+ if (TenantHeapIsolation && !context.is_system()) {
+ G1TenantAllocationContext* tac = context.tenant_allocation_context();
+ assert(NULL != tac, "Tenant alloc context cannot be NULL");
+ return tac->old_gc_alloc_region();
+ }
+ return &_old_gc_alloc_region;
+}
+
+size_t G1DefaultAllocator::used() {
+ assert(Heap_lock->owner() != NULL,
+ "Should be owned on this thread's behalf.");
+ size_t result = _summary_bytes_used;
+
+ if (TenantHeapIsolation) {
+ // root tenant's
+ HeapRegion* hr = mutator_alloc_region(AllocationContext::system())->get();
+ if (NULL != hr) { result += hr->used(); }
+
+ result += G1TenantAllocationContexts::total_used();
+ } else {
+ // TenantHeapIsolation disabled mode
+ // Read only once in case it is set to NULL concurrently
+ HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
+ if (hr != NULL) {
+ result += hr->used();
+ }
+ }
+ return result;
+}
+
void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
OldGCAllocRegion* old,
HeapRegion** retained_old) {
HeapRegion* retained_region = *retained_old;
*retained_old = NULL;
+ AllocationContext_t context = old->allocation_context();
+
+ DEBUG_ONLY(if (TenantHeapIsolation && NULL != retained_region) {
+ assert(context == retained_region->allocation_context(),
+ "Inconsistent tenant alloc contexts");
+ });
+
// We will discard the current GC alloc region if:
// a) it's in the collection set (it can happen!),
// b) it's already full (no point in using it),
@@ -69,7 +133,13 @@ void G1Allocator::reuse_retained_old_region(EvacuationInfo& evacuation_info,
retained_region->note_start_of_copying(during_im);
old->set(retained_region);
_g1h->_hr_printer.reuse(retained_region);
- evacuation_info.set_alloc_regions_used_before(retained_region->used());
+
+ // Do accumulation in tenant mode, otherwise just set it
+ if (TenantHeapIsolation) {
+ evacuation_info.increment_alloc_regions_used_before(retained_region->used());
+ } else {
+ evacuation_info.set_alloc_regions_used_before(retained_region->used());
+ }
}
}
@@ -81,10 +151,22 @@ void G1DefaultAllocator::init_gc_alloc_regions(EvacuationInfo& evacuation_info)
reuse_retained_old_region(evacuation_info,
&_old_gc_alloc_region,
&_retained_old_gc_alloc_region);
+
+ if (TenantHeapIsolation) {
+ // for non-root tenants
+ G1TenantAllocationContexts::init_gc_alloc_regions(this, evacuation_info);
+ }
}
void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info) {
AllocationContext_t context = AllocationContext::current();
+ if (TenantHeapIsolation) {
+ // in non-tenant mode, system() == current(), AllocationContext::current() just works.
+ // but in tenant mode, we are trying to release all gc alloc regions from all tenants,
+ // thus explicitly overwrite the first operand context to system() like below.
+ context = AllocationContext::system();
+ }
+
evacuation_info.set_allocation_regions(survivor_gc_alloc_region(context)->count() +
old_gc_alloc_region(context)->count());
survivor_gc_alloc_region(context)->release();
@@ -98,6 +180,11 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
_retained_old_gc_alloc_region->record_retained_region();
}
+ // Release GC alloc region for non-root tenants
+ if (TenantHeapIsolation) {
+ G1TenantAllocationContexts::release_gc_alloc_regions(evacuation_info);
+ }
+
if (ResizePLAB) {
_g1h->_survivor_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
_g1h->_old_plab_stats.adjust_desired_plab_sz(no_of_gc_workers);
@@ -105,9 +192,32 @@ void G1DefaultAllocator::release_gc_alloc_regions(uint no_of_gc_workers, Evacuat
}
void G1DefaultAllocator::abandon_gc_alloc_regions() {
- assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
- assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+ DEBUG_ONLY(if (TenantHeapIsolation) {
+ // in non-tenant mode, system() == current(), AllocationContext::current() just works.
+ // but in tenant mode, we are trying to release all gc alloc regions from all tenants,
+ // thus explicitly overwrite the first operand context to system() like below.
+ assert(survivor_gc_alloc_region(AllocationContext::system())->get() == NULL, "pre-condition");
+ assert(old_gc_alloc_region(AllocationContext::system())->get() == NULL, "pre-condition");
+ } else {
+ // original logic, untouched
+ assert(survivor_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+ assert(old_gc_alloc_region(AllocationContext::current())->get() == NULL, "pre-condition");
+ });
+
_retained_old_gc_alloc_region = NULL;
+
+ if (TenantHeapIsolation) {
+ G1TenantAllocationContexts::abandon_gc_alloc_regions();
+ }
+}
+
+bool G1DefaultAllocator::is_retained_old_region(HeapRegion* hr) {
+ if (TenantHeapIsolation && NULL != hr && !hr->allocation_context().is_system()) {
+ G1TenantAllocationContext* tac = hr->allocation_context().tenant_allocation_context();
+ assert(NULL != tac, "pre-condition");
+ return tac->retained_old_gc_alloc_region() == hr;
+ }
+ return _retained_old_gc_alloc_region == hr;
}
G1ParGCAllocBuffer::G1ParGCAllocBuffer(size_t gclab_word_size) :
@@ -138,15 +248,67 @@ HeapWord* G1ParGCAllocator::allocate_direct_or_new_plab(InCSetState dest,
}
}
+G1TenantParGCAllocBuffer::G1TenantParGCAllocBuffer(G1CollectedHeap* g1h,
+ AllocationContext_t ac)
+ : _allocation_context(ac)
+ , _g1h(g1h)
+ , _surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young))
+ , _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
+ assert(TenantHeapIsolation, "pre-condition");
+ for (uint state = 0; state < InCSetState::Num; state++) {
+ _alloc_buffers[state] = NULL;
+ }
+ _alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
+ _alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
+}
+
+G1ParGCAllocBuffer* G1TenantParGCAllocBuffer::alloc_buffer(InCSetState dest) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(dest.is_valid(), "just checking");
+ return _alloc_buffers[dest.value()];
+}
+
G1DefaultParGCAllocator::G1DefaultParGCAllocator(G1CollectedHeap* g1h) :
G1ParGCAllocator(g1h),
_surviving_alloc_buffer(g1h->desired_plab_sz(InCSetState::Young)),
- _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)) {
+ _tenured_alloc_buffer(g1h->desired_plab_sz(InCSetState::Old)),
+ _tenant_par_alloc_buffers(NULL) {
for (uint state = 0; state < InCSetState::Num; state++) {
_alloc_buffers[state] = NULL;
}
_alloc_buffers[InCSetState::Young] = &_surviving_alloc_buffer;
_alloc_buffers[InCSetState::Old] = &_tenured_alloc_buffer;
+
+ if (TenantHeapIsolation) {
+ _tenant_par_alloc_buffers = new TenantBufferMap(G1TenantAllocationContexts::active_context_count());
+ }
+}
+
+G1TenantParGCAllocBuffer* G1DefaultParGCAllocator::tenant_par_alloc_buffer_of(AllocationContext_t ac) {
+ assert(TenantHeapIsolation, "pre-condition");
+
+ // slow path to traverse over all tenant buffers
+ assert(NULL != _tenant_par_alloc_buffers, "just checking");
+ if (_tenant_par_alloc_buffers->contains(ac)) {
+ assert(NULL != _tenant_par_alloc_buffers->get(ac), "pre-condition");
+ return _tenant_par_alloc_buffers->get(ac)->value();
+ }
+
+ return NULL;
+}
+
+G1DefaultParGCAllocator::~G1DefaultParGCAllocator() {
+ if (TenantHeapIsolation) {
+ assert(NULL != _tenant_par_alloc_buffers, "just checking");
+ for (TenantBufferMap::Iterator itr = _tenant_par_alloc_buffers->begin();
+ itr != _tenant_par_alloc_buffers->end(); ++itr) {
+ assert(!itr->key().is_system(), "pre-condition");
+ G1TenantParGCAllocBuffer* tbuf = itr->value();
+ delete tbuf;
+ }
+ _tenant_par_alloc_buffers->clear();
+ delete _tenant_par_alloc_buffers;
+ }
}
void G1DefaultParGCAllocator::retire_alloc_buffers() {
@@ -158,5 +320,48 @@ void G1DefaultParGCAllocator::retire_alloc_buffers() {
true /* end_of_gc */,
false /* retain */);
}
+
+ if (TenantHeapIsolation) {
+ assert(NULL != _tenant_par_alloc_buffers, "just checking");
+ // retire all non-root buffers
+ for (TenantBufferMap::Iterator itr = _tenant_par_alloc_buffers->begin();
+ itr != _tenant_par_alloc_buffers->end(); ++itr) {
+ assert(!itr->key().is_system(), "pre-condition");
+ G1TenantParGCAllocBuffer* tbuf = itr->value();
+ assert(NULL != tbuf, "pre-condition");
+ G1ParGCAllocBuffer* buffer = tbuf->alloc_buffer(state);
+ if (buffer != NULL) {
+ add_to_alloc_buffer_waste(buffer->words_remaining());
+ buffer->flush_stats_and_retire(_g1h->alloc_buffer_stats(state), true, false);
+ }
+ }
+ } else {
+ assert(NULL == _tenant_par_alloc_buffers, "just checking");
+ }
+ }
+}
+
+G1ParGCAllocBuffer* G1DefaultParGCAllocator::alloc_buffer(InCSetState dest, AllocationContext_t context) {
+ assert(dest.is_valid(),
+ err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
+
+ if (TenantHeapIsolation && !context.is_system()) {
+ assert(NULL != _tenant_par_alloc_buffers, "just checking");
+ G1TenantParGCAllocBuffer* tbuf = tenant_par_alloc_buffer_of(context);
+ if (NULL == tbuf) {
+ tbuf = new G1TenantParGCAllocBuffer(_g1h, context);
+ _tenant_par_alloc_buffers->put(context, tbuf);
+ }
+
+ assert(NULL != tbuf
+ && NULL != _tenant_par_alloc_buffers->get(context)
+ && tbuf == _tenant_par_alloc_buffers->get(context)->value(), "post-condition");
+ G1ParGCAllocBuffer* buf = tbuf->alloc_buffer(dest);
+ assert(NULL != buf, "post-condition");
+ return buf;
}
+
+ assert(_alloc_buffers[dest.value()] != NULL,
+ err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
+ return _alloc_buffers[dest.value()];
}
diff --git a/src/share/vm/gc_implementation/g1/g1Allocator.hpp b/src/share/vm/gc_implementation/g1/g1Allocator.hpp
index 04628b7dec00b0031e806a541caed60cd833be2c..06c2e1f78c40da86a5973f688bc77f3ed5d99c41 100644
--- a/src/share/vm/gc_implementation/g1/g1Allocator.hpp
+++ b/src/share/vm/gc_implementation/g1/g1Allocator.hpp
@@ -29,6 +29,8 @@
#include "gc_implementation/g1/g1AllocRegion.hpp"
#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
+#include "utilities/hashtable.hpp"
+#include "utilities/hashtable.inline.hpp"
// Base class for G1 allocators.
class G1Allocator : public CHeapObj {
@@ -114,34 +116,15 @@ public:
virtual void release_gc_alloc_regions(uint no_of_gc_workers, EvacuationInfo& evacuation_info);
virtual void abandon_gc_alloc_regions();
- virtual bool is_retained_old_region(HeapRegion* hr) {
- return _retained_old_gc_alloc_region == hr;
- }
+ virtual bool is_retained_old_region(HeapRegion* hr);
- virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context) {
- return &_mutator_alloc_region;
- }
+ virtual MutatorAllocRegion* mutator_alloc_region(AllocationContext_t context);
- virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context) {
- return &_survivor_gc_alloc_region;
- }
+ virtual SurvivorGCAllocRegion* survivor_gc_alloc_region(AllocationContext_t context);
- virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context) {
- return &_old_gc_alloc_region;
- }
+ virtual OldGCAllocRegion* old_gc_alloc_region(AllocationContext_t context);
- virtual size_t used() {
- assert(Heap_lock->owner() != NULL,
- "Should be owned on this thread's behalf.");
- size_t result = _summary_bytes_used;
-
- // Read only once in case it is set to NULL concurrently
- HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get();
- if (hr != NULL) {
- result += hr->used();
- }
- return result;
- }
+ virtual size_t used();
};
class G1ParGCAllocBuffer: public ParGCAllocBuffer {
@@ -209,6 +192,8 @@ public:
_alloc_buffer_waste(0), _undo_waste(0) {
}
+ virtual ~G1ParGCAllocator() { }
+
static G1ParGCAllocator* create_allocator(G1CollectedHeap* g1h);
size_t alloc_buffer_waste() { return _alloc_buffer_waste; }
@@ -255,21 +240,51 @@ public:
}
};
+class G1DefaultParGCAllocator;
+
+// To encapsulate per-tenant ParGCAllocBuffer for G1DefaultParGCAllocator to use
+// during GC pause.
+// NOTE: thread local object
+class G1TenantParGCAllocBuffer : public CHeapObj {
+ friend class G1DefaultParGCAllocator;
+private:
+ G1ParGCAllocBuffer _surviving_alloc_buffer;
+ G1ParGCAllocBuffer _tenured_alloc_buffer;
+ G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
+
+ AllocationContext_t _allocation_context; // NOTE: used during GC, be careful with dereferencing
+ G1CollectedHeap* _g1h;
+
+private:
+ G1TenantParGCAllocBuffer(G1CollectedHeap* g1h, AllocationContext_t ac);
+
+ G1ParGCAllocBuffer* alloc_buffer(InCSetState dest);
+
+ AllocationContext_t allocation_context() { return _allocation_context; }
+ void set_allocation_context(AllocationContext_t ac) { _allocation_context = ac; }
+};
+
class G1DefaultParGCAllocator : public G1ParGCAllocator {
+private:
+ // only for ROOT tenant if TenantHeapIsolation enabled
G1ParGCAllocBuffer _surviving_alloc_buffer;
G1ParGCAllocBuffer _tenured_alloc_buffer;
G1ParGCAllocBuffer* _alloc_buffers[InCSetState::Num];
+ // Per-tenant par gc allocation buffers
+ typedef HashMap TenantBufferMap;
+ TenantBufferMap* _tenant_par_alloc_buffers;
+
+protected:
+ // returns tenant alloc buffer of target allocation context, NULL if not exist
+ G1TenantParGCAllocBuffer* tenant_par_alloc_buffer_of(AllocationContext_t ac);
+
public:
G1DefaultParGCAllocator(G1CollectedHeap* g1h);
- virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context) {
- assert(dest.is_valid(),
- err_msg("Allocation buffer index out-of-bounds: " CSETSTATE_FORMAT, dest.value()));
- assert(_alloc_buffers[dest.value()] != NULL,
- err_msg("Allocation buffer is NULL: " CSETSTATE_FORMAT, dest.value()));
- return _alloc_buffers[dest.value()];
- }
+ virtual ~G1DefaultParGCAllocator();
+
+ virtual G1ParGCAllocBuffer* alloc_buffer(InCSetState dest, AllocationContext_t context);
virtual void retire_alloc_buffers() ;
};
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index 9156a0872d1dc455286e2b6e46e20a9bdb88abb8..00998395923da6da970ca544b4c87042a2055319 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -68,6 +68,7 @@
#include "runtime/orderAccess.inline.hpp"
#include "runtime/vmThread.hpp"
#include "gc_implementation/g1/elasticHeap.hpp"
+#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
@@ -849,6 +850,14 @@ G1CollectedHeap::mem_allocate(size_t word_size,
} else {
result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
}
+
+#ifndef PRODUCT
+ if (TenantHeapIsolation && TraceNonRootTenantAllocation && !AllocationContext::current().is_system()) {
+ tty->print_cr("Non-root allocation: " SIZE_FORMAT " bytes @0x" PTR_FORMAT " in tenant 0x" PTR_FORMAT,
+ word_size * HeapWordSize, result, AllocationContext::current().tenant_allocation_context());
+ }
+#endif
+
if (result != NULL) {
return result;
}
@@ -1972,6 +1981,11 @@ jint G1CollectedHeap::initialize() {
// it will be used then.
_hr_printer.set_active(G1PrintHeapRegions);
+ // have to do this early before mutator_alloc_region initialization
+ if (TenantHeapIsolation) {
+ G1TenantAllocationContexts::initialize();
+ }
+
// While there are no constraints in the GC code that HeapWordSize
// be any particular value, there are multiple other areas in the
// system which believe this to be true (e.g. oop->object_size in some
@@ -7055,3 +7069,41 @@ void G1CollectedHeap::rebuild_strong_code_roots() {
RebuildStrongCodeRootClosure blob_cl(this);
CodeCache::blobs_do(&blob_cl);
}
+
+void G1CollectedHeap::create_tenant_allocation_context(oop tenant_obj) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(tenant_obj != NULL, "Tenant container object is null");
+
+ G1TenantAllocationContext* context = new (mtTenant) G1TenantAllocationContext(this);
+ assert(NULL != context, "Failed to create tenant context");
+
+ com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(tenant_obj, context);
+ context->set_tenant_container(tenant_obj);
+}
+
+void G1CollectedHeap::destroy_tenant_allocation_context(jlong context_val) {
+ assert(TenantHeapIsolation, "pre-condition");
+ G1TenantAllocationContext* context = (G1TenantAllocationContext*)context_val;
+ assert(NULL != context, "Delete an uninitialized tenant container");
+ oop tenant_obj = context->tenant_container();
+ assert(tenant_obj != NULL, "TenantContainer object cannot be NULL");
+ delete context;
+ com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(tenant_obj, NULL);
+}
+
+oop G1CollectedHeap::tenant_container_of(oop obj) {
+ assert(TenantHeapIsolation, "pre-condition");
+
+ if (obj != NULL) {
+ // Get: oop-> object address-> heap region -> tenant allocation context -> tenant obj
+ // assert obj
+ HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
+ if (NULL != hr) {
+ const G1TenantAllocationContext* context = hr->tenant_allocation_context();
+ if (NULL != context) {
+ return context->tenant_container();
+ }
+ }
+ }
+ return NULL;
+}
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
index 5d314e82bfad7e9b4d517c50eb86ec405bf94e05..1bca8daff4ec37ff0bc00db4530a4b001ac010b2 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp
@@ -35,6 +35,7 @@
#include "gc_implementation/g1/g1InCSetState.hpp"
#include "gc_implementation/g1/g1MonitoringSupport.hpp"
#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
+#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
#include "gc_implementation/g1/g1YCTypes.hpp"
#include "gc_implementation/g1/heapRegionManager.hpp"
#include "gc_implementation/g1/heapRegionSet.hpp"
@@ -1652,6 +1653,11 @@ public:
void print_cset_rsets() PRODUCT_RETURN;
void print_all_rsets() PRODUCT_RETURN;
+ // Tenant allocation context manipulation
+ void create_tenant_allocation_context(oop tenant_obj);
+ void destroy_tenant_allocation_context(jlong context);
+ oop tenant_container_of(oop obj);
+
public:
size_t pending_card_num();
size_t cards_scanned();
diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
index fcf09270aecb6c35291f3920743a337d71685523..2683386ee403c8121fd1557ccb200d29f8cf5ca1 100644
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.cpp
@@ -77,6 +77,9 @@ G1GCPhaseTimes::G1GCPhaseTimes(uint max_gc_threads) :
_gc_par_phases[RedirtyCards] = new WorkerDataArray(max_gc_threads, "Parallel Redirty", true, G1Log::LevelFinest, 3);
_redirtied_cards = new WorkerDataArray(max_gc_threads, "Redirtied Cards", true, G1Log::LevelFinest, 3);
_gc_par_phases[RedirtyCards]->link_thread_work_items(_redirtied_cards);
+
+ // Cannot guard below line with TenantHeapIsolation since we do not have conditional compilation for tenant mode
+ _gc_par_phases[TenantAllocationContextRoots] = new WorkerDataArray(max_gc_threads, "G1TenantAllocationContext Roots (ms)", true, G1Log::LevelFinest, 3);
}
void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress) {
@@ -90,6 +93,8 @@ void G1GCPhaseTimes::note_gc_start(uint active_gc_threads, bool mark_in_progress
_gc_par_phases[StringDedupQueueFixup]->set_enabled(G1StringDedup::is_enabled());
_gc_par_phases[StringDedupTableFixup]->set_enabled(G1StringDedup::is_enabled());
+
+ _gc_par_phases[TenantAllocationContextRoots]->set_enabled(TenantHeapIsolation);
}
void G1GCPhaseTimes::note_gc_end() {
diff --git a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
index e7d6e40b974b170034d336dcf481d24674880bbd..a8b2c71a26a5b4fb34cbe0393840346151fb99a7 100644
--- a/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
+++ b/src/share/vm/gc_implementation/g1/g1GCPhaseTimes.hpp
@@ -43,6 +43,7 @@ class G1GCPhaseTimes : public CHeapObj {
GCWorkerStart,
ExtRootScan,
ThreadRoots,
+ TenantAllocationContextRoots,
StringTableRoots,
UniverseRoots,
JNIRoots,
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
index be4cebb5488466264f7e2b43c4e22873f23010c2..8f46b0cb927ee4114adb3c36beb0efd16457a76d 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp
@@ -332,14 +332,67 @@ void G1PrepareCompactClosure::free_humongous_region(HeapRegion* hr) {
dummy_free_list.remove_all();
}
+bool G1PrepareCompactClosure::is_cp_initialized_for(AllocationContext_t ac) {
+ assert_at_safepoint(true /* in vm thread */);
+ assert(TenantHeapIsolation, "pre-condition");
+
+ if (ac.is_system()) {
+ return _root_cp.space != NULL;
+ }
+ G1TenantAllocationContext* tac = ac.tenant_allocation_context();
+ assert(NULL != tac, "Tenant alloc context cannot be NULL");
+ return tac->cached_compact_point().space != NULL;
+}
+
void G1PrepareCompactClosure::prepare_for_compaction(HeapRegion* hr, HeapWord* end) {
// If this is the first live region that we came across which we can compact,
// initialize the CompactPoint.
- if (!is_cp_initialized()) {
- _cp.space = hr;
- _cp.threshold = hr->initialize_threshold();
+ // Otherwise if TenantHeapIsolation enabled, just load saved CompactPoint from
+ // corresponding tenant context
+ if (TenantHeapIsolation) {
+ assert_at_safepoint(true /* in vm thread */);
+ AllocationContext_t ac = hr->allocation_context();
+ if (!is_cp_initialized_for(ac)) {
+ // first live region of this tenant
+ _cp.threshold = hr->initialize_threshold();
+ _cp.space = hr;
+ } else {
+ // if not the first time, should do switching
+ HeapRegion* cur_space = (HeapRegion*)_cp.space;
+ if (ac != cur_space->allocation_context()) {
+ // pick the corresponding saved compact compact points base on tenant alloc contexts
+ if (ac.is_system()) {
+ _cp = _root_cp;
+ } else {
+ G1TenantAllocationContext* tac = ac.tenant_allocation_context();
+ assert(NULL != tac, "just checking");
+ _cp = tac->cached_compact_point();
+ }
+ assert(NULL != _cp.space, "post-condition");
+ }
+ }
+ } else /* if (!TenantHeapIsolation) */ {
+ if (!is_cp_initialized()) {
+ // will be called only once during the whole iteration
+ _cp.space = hr;
+ _cp.threshold = hr->initialize_threshold();
+ }
}
+
prepare_for_compaction_work(&_cp, hr, end);
+
+ // save current CompactPoint to corresponding tenant context
+ if (TenantHeapIsolation) {
+ assert(NULL != _cp.space, "pre-condition");
+ HeapRegion* cur_space = (HeapRegion*)_cp.space;
+ if (cur_space->allocation_context().is_system()) {
+ _root_cp = _cp;
+ } else {
+ G1TenantAllocationContext* tac = cur_space->allocation_context().tenant_allocation_context();
+ assert(NULL != tac, "just checking");
+ tac->set_cached_compact_point(_cp);
+ }
+ }
}
void G1PrepareCompactClosure::prepare_for_compaction_work(CompactPoint* cp,
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
index cdde980d30646e7a660c23e03f7b5bbdd3d3a6b4..b21875ce9ce138320ccadca4ce51fc552fa7e9b3 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.hpp
@@ -79,6 +79,7 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
ModRefBarrierSet* _mrbs;
CompactPoint _cp;
+ CompactPoint _root_cp;
HeapRegionSetCount _humongous_regions_removed;
virtual void prepare_for_compaction(HeapRegion* hr, HeapWord* end);
@@ -86,6 +87,9 @@ class G1PrepareCompactClosure : public HeapRegionClosure {
void free_humongous_region(HeapRegion* hr);
bool is_cp_initialized() const { return _cp.space != NULL; }
+ // check cp based on alloc context, this is to support TenantHeapIsolation
+ bool is_cp_initialized_for(AllocationContext_t ac);
+
public:
G1PrepareCompactClosure() :
_g1h(G1CollectedHeap::heap()),
diff --git a/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp b/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp
index 006e787bee31e05d04a27bfca50f83ecb954b1ef..29ba5e2fecb9c9426ed05e10864b2e7c3d1db53a 100644
--- a/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp
+++ b/src/share/vm/gc_implementation/g1/g1MarkSweep_ext.cpp
@@ -26,6 +26,11 @@
#include "gc_implementation/g1/g1MarkSweep.hpp"
void G1MarkSweep::prepare_compaction() {
+ if (TenantHeapIsolation) {
+ // clear compaction dest info for all tenants
+ G1TenantAllocationContexts::prepare_for_compaction();
+ }
+
G1PrepareCompactClosure blk;
G1MarkSweep::prepare_compaction_work(&blk);
}
diff --git a/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp b/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
index 5b27a017a4114b2785b651ff7500088926695a61..25ce52cc15a6b19fa72214ff25278878e5e93b66 100644
--- a/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
+++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.cpp
@@ -327,6 +327,14 @@ void G1RootProcessor::process_vm_roots(OopClosure* strong_roots,
SystemDictionary::roots_oops_do(strong_roots, weak_roots);
}
}
+
+ if (TenantHeapIsolation) {
+ // process references from G1TenantAllocationContext
+ G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::TenantAllocationContextRoots, worker_i);
+ if (!_process_strong_tasks.is_task_claimed(G1RP_PS_TenantAllocationContext_oops_do)) {
+ G1TenantAllocationContexts::oops_do(strong_roots);
+ }
+ }
}
void G1RootProcessor::process_string_table_roots(OopClosure* weak_roots, G1GCPhaseTimes* phase_times,
diff --git a/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp b/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
index ad4e75ba3c106791b6b6badd43e7f2816d7cb782..9bdbbab718d523b6b82155ae49b66ba90d64d751 100644
--- a/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
+++ b/src/share/vm/gc_implementation/g1/g1RootProcessor.hpp
@@ -65,6 +65,7 @@ class G1RootProcessor : public StackObj {
G1RP_PS_CodeCache_oops_do,
G1RP_PS_filter_satb_buffers,
G1RP_PS_refProcessor_oops_do,
+ G1RP_PS_TenantAllocationContext_oops_do,
// Leave this one last.
G1RP_PS_NumElements
};
diff --git a/src/share/vm/gc_implementation/g1/g1TenantAllocationContext.cpp b/src/share/vm/gc_implementation/g1/g1TenantAllocationContext.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..6363362ce28f12544adff16187c2e88246a0ff96
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1TenantAllocationContext.cpp
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "runtime/thread.hpp"
+#include "runtime/orderAccess.hpp"
+#include "runtime/safepoint.hpp"
+#include "runtime/mutexLocker.hpp"
+#include "memory/iterator.hpp"
+#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
+#include "gc_implementation/g1/g1CollectedHeap.hpp"
+
+//----------------------- G1TenantAllocationContext ---------------------------
+
+G1TenantAllocationContext::G1TenantAllocationContext(G1CollectedHeap* g1h)
+ : _g1h(g1h),
+ _occupied_heap_region_count(0),
+ _heap_size_limit(TENANT_HEAP_NO_LIMIT),
+ _heap_region_limit(0),
+ _tenant_container(NULL),
+ _retained_old_gc_alloc_region(NULL) {
+
+ assert(TenantHeapIsolation, "pre-condition");
+ // in current design we do not create G1TenantAllocationContext at safepoint
+ assert_not_at_safepoint();
+
+#ifndef PRODUCT
+ if (TraceG1TenantAllocationContext) {
+ tty->print_cr("Create G1TenantAllocationContext: " PTR_FORMAT, p2i(this));
+ }
+#endif
+
+ // init mutator allocator eagerly, because it may be used
+ // to allocate memory immediately after creation of tenant alloc context
+ _mutator_alloc_region.init();
+
+ AllocationContext_t ac(this);
+ _mutator_alloc_region.set_allocation_context(ac);
+ _survivor_gc_alloc_region.set_allocation_context(ac);
+ _old_gc_alloc_region.set_allocation_context(ac);
+
+ G1TenantAllocationContexts::add(this);
+}
+
+class ClearAllocationContextClosure : public HeapRegionClosure {
+private:
+ AllocationContext_t _target;
+public:
+ ClearAllocationContextClosure(AllocationContext_t ctxt) : _target(ctxt) {
+ assert(!_target.is_system(), "Cannot clear root tenant context");
+ }
+
+ virtual bool doHeapRegion(HeapRegion* region) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(NULL != region, "Region cannot be NULL");
+ if (region->allocation_context() == _target) {
+ region->set_allocation_context(AllocationContext::system());
+ }
+ return false /* forcefully iterate over all regions */;
+ }
+};
+
+// clean up work that has to be done at safepoint
+class DestroyG1TenantAllocationContextOperation : public VM_Operation {
+private:
+ G1TenantAllocationContext* _context_to_destroy;
+public:
+ DestroyG1TenantAllocationContextOperation(G1TenantAllocationContext* context)
+ : _context_to_destroy(context)
+ {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(_context_to_destroy != G1TenantAllocationContexts::system_context(),
+ "Should never destroy system context");
+ assert(!oopDesc::is_null(_context_to_destroy->tenant_container()), "sanity");
+ }
+ virtual void doit();
+ virtual VMOp_Type type() const { return VMOp_DestroyG1TenantAllocationContext; }
+};
+
+void DestroyG1TenantAllocationContextOperation::doit() {
+ assert_at_safepoint(true /* vm thread */);
+
+ if (UsePerTenantTLAB) {
+ assert(UseTLAB, "Sanity");
+ for (JavaThread *thread = Threads::first(); thread != NULL; thread = thread->next()) {
+ thread->clean_tlab_for(_context_to_destroy);
+ }
+ }
+
+ // return any active mutator alloc region
+ MutatorAllocRegion* mar = _context_to_destroy->mutator_alloc_region();
+ HeapRegion* hr = mar->release();
+ assert(mar->get() == NULL, "post-condition");
+ if (hr != NULL) { // if this mutator region has been used
+ // 1, return mutator's heap region to root tenant;
+ // 2, after GC, objects still live in #1's heap region, will be
+ // owned by root
+ hr->set_allocation_context(AllocationContext::system());
+ }
+
+ // traverse all HeapRegions and update alloc contexts
+ AllocationContext_t ctxt(_context_to_destroy);
+ ClearAllocationContextClosure cl(ctxt);
+ G1CollectedHeap::heap()->heap_region_iterate(&cl);
+
+ G1TenantAllocationContexts::remove(_context_to_destroy);
+ com_alibaba_tenant_TenantContainer::set_tenant_allocation_context(_context_to_destroy->tenant_container(),
+ G1TenantAllocationContexts::system_context());
+
+#ifndef PRODUCT
+ if (TraceG1TenantAllocationContext) {
+ tty->print_cr("Destroy G1TenantAllocationContext:" PTR_FORMAT, p2i(_context_to_destroy));
+ }
+#endif
+}
+
+G1TenantAllocationContext::~G1TenantAllocationContext() {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert_not_at_safepoint();
+
+ DestroyG1TenantAllocationContextOperation vm_op(this);
+ VMThread::execute(&vm_op);
+}
+
+void G1TenantAllocationContext::inc_occupied_heap_region_count() {
+ assert(TenantHeapIsolation && occupied_heap_region_count() >= 0, "pre-condition");
+ assert(Heap_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint(), "not locked");
+ Atomic::inc_ptr(&_occupied_heap_region_count);
+ assert(occupied_heap_region_count() >= 1, "post-condition");
+}
+
+void G1TenantAllocationContext::dec_occupied_heap_region_count() {
+ assert(TenantHeapIsolation && occupied_heap_region_count() >= 1, "pre-condition");
+ assert(Heap_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint(), "not locked");
+ Atomic::dec_ptr(&_occupied_heap_region_count);
+ assert(occupied_heap_region_count() >= 0, "post-condition");
+}
+
+G1TenantAllocationContext* G1TenantAllocationContext::current() {
+ assert(TenantHeapIsolation, "pre-condition");
+
+ Thread* thrd = Thread::current();
+ assert(NULL != thrd, "Failed to get current thread");
+ return thrd->allocation_context().tenant_allocation_context();
+}
+
+size_t G1TenantAllocationContext::heap_bytes_to_region_num(size_t size_in_bytes) {
+ return heap_words_to_region_num(size_in_bytes >> LogBytesPerWord);
+}
+
+size_t G1TenantAllocationContext::heap_words_to_region_num(size_t size_in_words) {
+ assert(TenantHeapIsolation, "pre-condition");
+ return align_size_up_(size_in_words, HeapRegion::GrainWords) / HeapRegion::GrainWords;
+}
+
+//--------------------- G1TenantAllocationContexts ---------------------
+G1TenantAllocationContexts::G1TenantACList* G1TenantAllocationContexts::_contexts = NULL;
+
+Mutex* G1TenantAllocationContexts::_list_lock = NULL;
+
+void G1TenantAllocationContexts::add(G1TenantAllocationContext* tac) {
+ assert(TenantHeapIsolation, "pre-condition");
+ if (NULL != tac) {
+ MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
+ _contexts->append(tac);
+ }
+}
+
+void G1TenantAllocationContexts::remove(G1TenantAllocationContext* tac) {
+ assert(TenantHeapIsolation, "pre-condition");
+ if (NULL != tac) {
+ MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
+ _contexts->remove(tac);
+ }
+}
+
+
+long G1TenantAllocationContexts::active_context_count() {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(NULL != "_contexts", "Tenant alloc context list not initialized");
+ MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
+ return _contexts->length();
+}
+
+void G1TenantAllocationContexts::iterate(G1TenantAllocationContextClosure* closure) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(NULL != closure, "NULL closure pointer");
+
+ MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag);
+ for (GrowableArrayIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ closure->do_tenant_allocation_context(*itr);
+ }
+}
+
+void G1TenantAllocationContexts::initialize() {
+ assert(TenantHeapIsolation, "pre-condition");
+ _contexts = new (ResourceObj::C_HEAP, mtTenant) G1TenantACList(128, true, mtTenant);
+ _list_lock = new Mutex(Mutex::leaf, "G1TenantAllocationContext list lock", true /* allow vm lock */);
+}
+
+void G1TenantAllocationContexts::prepare_for_compaction() {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert_at_safepoint(true /* in vm thread */);
+
+ // no locking needed
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ assert(NULL != (*itr), "pre-condition");
+ (*itr)->_ccp.reset();
+ }
+}
+
+void G1TenantAllocationContexts::oops_do(OopClosure* f) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert(NULL != f, "OopClosure pointer is NULL");
+
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ assert(NULL != (*itr), "pre-condition");
+ f->do_oop(&((*itr)->_tenant_container));
+ }
+}
+
+void G1TenantAllocationContexts::init_mutator_alloc_regions() {
+ assert(TenantHeapIsolation, "pre-condition");
+
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ assert(NULL != (*itr), "pre-condition");
+ MutatorAllocRegion& mar = (*itr)->_mutator_alloc_region;
+ assert(mar.get() == NULL, "pre-condition");
+ mar.init();
+ }
+}
+
+void G1TenantAllocationContexts::release_mutator_alloc_regions() {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert_at_safepoint(true /* in vm thread */);
+
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ assert(NULL != (*itr), "pre-condition");
+ MutatorAllocRegion& mar = (*itr)->_mutator_alloc_region;
+ mar.release();
+ assert(mar.get() == NULL, "pre-condition");
+ }
+}
+
+size_t G1TenantAllocationContexts::total_used() {
+ assert(TenantHeapIsolation, "pre-condition");
+
+ size_t res = 0;
+ MutexLockerEx ml(_list_lock, Monitor::_no_safepoint_check_flag); // have lock, may not in safepoint
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ assert(NULL != (*itr), "pre-condition");
+ HeapRegion* hr = (*itr)->_mutator_alloc_region.get();
+ if (NULL != hr) {
+ res += hr->used();
+ }
+ }
+
+ return res;
+}
+
+void G1TenantAllocationContexts::init_gc_alloc_regions(G1Allocator* allocator, EvacuationInfo& ei) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert_at_safepoint(true /* in vm thread */);
+ assert(NULL != allocator, "Allocator cannot be NULL");
+
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ G1TenantAllocationContext* tac = (*itr);
+ assert(NULL != tac, "pre-condition");
+
+ SurvivorGCAllocRegion& survivor_region = tac->_survivor_gc_alloc_region;
+ OldGCAllocRegion& old_region = tac->_old_gc_alloc_region;
+
+ survivor_region.init();
+ old_region.init();
+
+ allocator->reuse_retained_old_region(ei, &old_region,
+ &(tac->_retained_old_gc_alloc_region));
+ }
+}
+
+void G1TenantAllocationContexts::release_gc_alloc_regions(EvacuationInfo& ei) {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert_at_safepoint(true /* in vm thread */);
+
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ G1TenantAllocationContext* tac = (*itr);
+ assert(NULL != tac, "pre-condition");
+
+ SurvivorGCAllocRegion& survivor_region = tac->_survivor_gc_alloc_region;
+ OldGCAllocRegion& old_region = tac->_old_gc_alloc_region;
+
+ ei.increment_allocation_regions(survivor_region.count() + old_region.count());
+
+ survivor_region.release();
+ HeapRegion* retained_old = old_region.release();
+
+ tac->set_retained_old_gc_alloc_region(retained_old);
+ if (NULL != tac->retained_old_gc_alloc_region()) {
+ tac->retained_old_gc_alloc_region()->record_retained_region();
+ }
+ }
+}
+
+void G1TenantAllocationContexts::abandon_gc_alloc_regions() {
+ assert(TenantHeapIsolation, "pre-condition");
+ assert_at_safepoint(true /* in vm thread */);
+
+ for (G1TenantACListIterator itr = _contexts->begin();
+ itr != _contexts->end(); ++itr) {
+ G1TenantAllocationContext* tac = *itr;
+ assert(NULL != tac, "pre-condition");
+ assert(NULL == tac->_survivor_gc_alloc_region.get(), "pre-condition");
+ assert(NULL == tac->_old_gc_alloc_region.get(), "pre-condition");
+ (*itr)->set_retained_old_gc_alloc_region(NULL);
+ }
+}
+
+G1TenantAllocationContext* G1TenantAllocationContexts::system_context() {
+ assert(TenantHeapIsolation, "pre-condition");
+ return AllocationContext::system().tenant_allocation_context();
+}
diff --git a/src/share/vm/gc_implementation/g1/g1TenantAllocationContext.hpp b/src/share/vm/gc_implementation/g1/g1TenantAllocationContext.hpp
new file mode 100644
index 0000000000000000000000000000000000000000..aa8bbb48c2a0952390a21332e75f78144b1102dd
--- /dev/null
+++ b/src/share/vm/gc_implementation/g1/g1TenantAllocationContext.hpp
@@ -0,0 +1,168 @@
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_TENANT_CONTEXT_HPP
+#define SHARE_VM_GC_IMPLEMENTATION_G1_TENANT_CONTEXT_HPP
+
+#include "gc_implementation/g1/g1AllocationContext.hpp"
+#include "gc_implementation/g1/g1AllocRegion.hpp"
+#include "gc_implementation/g1/heapRegionSet.hpp"
+#include "gc_implementation/g1/g1Allocator.hpp"
+#include "memory/allocation.hpp"
+#include "memory/iterator.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/vm_operations.hpp"
+
+class OopClosure;
+class G1TenantAllocationContext;
+class G1TenantAllocationContexts;
+
+/*
+ * Closure to encapsulate operations to iterate over all G1TenantAllocationContext
+ */
+class G1TenantAllocationContextClosure : public Closure {
+public:
+ virtual void do_tenant_allocation_context(G1TenantAllocationContext*) = 0;
+};
+
+// By default, no limit on newly created G1TenantAllocationContext
+#define TENANT_HEAP_NO_LIMIT 0
+
+/*
+ * G1TenantAllocationContext identifies a group of isolated Java heap regions associated with
+ * one TenantContainer.
+ *
+ * Only valid when -XX:+TenantHeapIsolation enabled
+ *
+ */
+class G1TenantAllocationContext : public CHeapObj {
+ friend class VMStructs;
+ friend class G1TenantAllocationContexts;
+private:
+ G1CollectedHeap* _g1h; // The only g1 heap instance
+
+ // Memory allocation related
+ MutatorAllocRegion _mutator_alloc_region; // mutator regions from young list
+ SurvivorGCAllocRegion _survivor_gc_alloc_region; // survivor region used during GC
+ OldGCAllocRegion _old_gc_alloc_region; // Old region used during GC
+ HeapRegion* _retained_old_gc_alloc_region; // the retained old region for this tenant
+
+ // HeapRegion throttling related
+ size_t _heap_size_limit; // user-defined max heap space for this tenant, in bytes
+ size_t _heap_region_limit; // user-defined max heap space for this tenant, in heap regions
+ size_t _occupied_heap_region_count; // number of regions occupied by this tenant
+
+ // Tenant alloc context list is now part of root set since each node
+ // keeps a strong reference to TenantContainer object for containerOf() API
+ oop _tenant_container; // handle to tenant container object
+
+ CachedCompactPoint _ccp; // cached CompactPoint during full GC compaction
+
+public:
+ // Newly allocated G1TenantAllocationContext will be put at the head of tenant alloc context list
+ G1TenantAllocationContext(G1CollectedHeap* g1h);
+ virtual ~G1TenantAllocationContext();
+
+ MutatorAllocRegion* mutator_alloc_region() { return &_mutator_alloc_region; }
+ SurvivorGCAllocRegion* survivor_gc_alloc_region() { return &_survivor_gc_alloc_region; }
+ OldGCAllocRegion* old_gc_alloc_region() { return &_old_gc_alloc_region; }
+ HeapRegion* retained_old_gc_alloc_region() { return _retained_old_gc_alloc_region; }
+ void set_retained_old_gc_alloc_region(HeapRegion* hr) { _retained_old_gc_alloc_region = hr; }
+
+ // Get and set tenant container handle
+ oop tenant_container() const { return _tenant_container; }
+ void set_tenant_container(oop handle) { _tenant_container = handle; }
+
+ // get/set heap size limit
+ size_t heap_size_limit() const { return _heap_size_limit; }
+ void set_heap_size_limit(size_t new_size);
+
+ // get heap region limit, the size is calculated automatically
+ size_t heap_region_limit() const { return _heap_region_limit; }
+
+ // set/get occupied heap region count
+ void inc_occupied_heap_region_count();
+ void dec_occupied_heap_region_count();
+ size_t occupied_heap_region_count() { return _occupied_heap_region_count; }
+
+ // record the compact dest
+ const CachedCompactPoint& cached_compact_point() const { return _ccp; }
+ void set_cached_compact_point(CompactPoint cp) { _ccp = cp; }
+
+ // Retrieve pointer to current tenant context, NULL if in root container
+ static G1TenantAllocationContext* current();
+
+private:
+ // calculate how many regions will a size occupy,
+ // if 0 < size < HeapRegion::GrainWords or GrainBytes, returns 1; if size == 0, returns 0;
+ static size_t heap_bytes_to_region_num(size_t size_in_bytes);
+ static size_t heap_words_to_region_num(size_t size_in_words);
+};
+
+// To encapsulate operations for all existing tenants
+class G1TenantAllocationContexts : public AllStatic {
+ friend class VMStructs;
+public:
+ typedef GrowableArray G1TenantACList;
+ typedef GrowableArrayIterator G1TenantACListIterator;
+
+private:
+ // NOTE: below two objects are created on C heap, but never released across
+ // JVM lifetime, we do this on purpose because they will exists as long
+ // as Java heap exists, and Java heap will not be destroyed until JVM
+ // dies.
+ static G1TenantACList *_contexts; // Tenant contexts are organized into doubly-linked list
+ static Mutex *_list_lock;
+
+public:
+ static void add(G1TenantAllocationContext*);
+ static void remove(G1TenantAllocationContext*);
+
+ // initialize shared data
+ static void initialize();
+
+ // Get total number of active tenant containers
+ static long active_context_count();
+
+ // Perform operation upon all tenant alloc contexts
+ static void iterate(G1TenantAllocationContextClosure* closure);
+
+ // Prepare for full GC compaction
+ static void prepare_for_compaction();
+
+ // GC support, we keep a reference to the TenantContainer oop
+ static void oops_do(OopClosure* f);
+
+ static void init_mutator_alloc_regions();
+ static void release_mutator_alloc_regions();
+
+ static size_t total_used();
+
+ static void init_gc_alloc_regions(G1Allocator* allocator, EvacuationInfo& ei);
+ static void release_gc_alloc_regions(EvacuationInfo& ei);
+
+ static void abandon_gc_alloc_regions();
+
+ static G1TenantAllocationContext* system_context();
+};
+
+#endif // SHARE_VM_GC_IMPLEMENTATION_G1_TENANT_CONTEXT_HPP
diff --git a/src/share/vm/gc_implementation/g1/g1_globals.hpp b/src/share/vm/gc_implementation/g1/g1_globals.hpp
index e24cc9594334bfb740cdfd9160635dce4cc14b5c..618c39b8b6abaaf3484206eec40b9ecb6a4f8c18 100644
--- a/src/share/vm/gc_implementation/g1/g1_globals.hpp
+++ b/src/share/vm/gc_implementation/g1/g1_globals.hpp
@@ -328,7 +328,13 @@
"Verify the code root lists attached to each heap region.") \
\
develop(bool, G1VerifyBitmaps, false, \
- "Verifies the consistency of the marking bitmaps")
+ "Verifies the consistency of the marking bitmaps") \
+ \
+ develop(bool, TraceG1TenantAllocationContext, false, \
+ "Trace tenant allocation context changes") \
+ \
+ develop(bool, TraceNonRootTenantAllocation, false, \
+ "Trace memory allocated for non-root tenants") \
G1_FLAGS(DECLARE_DEVELOPER_FLAG, DECLARE_PD_DEVELOPER_FLAG, DECLARE_PRODUCT_FLAG, DECLARE_PD_PRODUCT_FLAG, DECLARE_DIAGNOSTIC_FLAG, DECLARE_EXPERIMENTAL_FLAG, DECLARE_NOTPRODUCT_FLAG, DECLARE_MANAGEABLE_FLAG, DECLARE_PRODUCT_RW_FLAG)
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.cpp b/src/share/vm/gc_implementation/g1/heapRegion.cpp
index b3f1ea0a798888883d67f89dd8ff58b5ca19bc92..510a6fe01c799deed952f8f128770611a49ef03d 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.cpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp
@@ -319,7 +319,26 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
CompactibleSpace* HeapRegion::next_compaction_space() const {
- return G1CollectedHeap::heap()->next_compaction_region(this);
+ if (TenantHeapIsolation) {
+ assert_at_safepoint(true /* in vm thread */);
+
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+ assert(NULL != g1h, "g1h cannot be NULL");
+ HeapRegion* hr = g1h->next_compaction_region(this);
+ while (NULL != hr) {
+ assert(!hr->isHumongous(), "just checking");
+ if (hr->allocation_context() == allocation_context()) {
+ return hr;
+ }
+ hr = g1h->next_compaction_region(hr);
+ }
+ // The worst case is to return 'this', cannot be NULL
+ assert(NULL != hr, "post-condition");
+ return hr;
+
+ } else {
+ return G1CollectedHeap::heap()->next_compaction_region(this);
+ }
}
void HeapRegion::note_self_forwarding_removal_start(bool during_initial_mark,
@@ -642,7 +661,15 @@ void HeapRegion::verify_strong_code_roots(VerifyOption vo, bool* failures) const
void HeapRegion::print() const { print_on(gclog_or_tty); }
void HeapRegion::print_on(outputStream* st) const {
- st->print("AC%4u", allocation_context());
+ if (TenantHeapIsolation) {
+ if (NULL == tenant_allocation_context()) {
+ st->print(" TENANT-ROOT");
+ } else {
+ assert(!allocation_context().is_system(), "Inconsistent allocation contexts");
+ st->print(" TENANT-" PTR_FORMAT, allocation_context().tenant_allocation_context());
+ }
+ }
+ st->print(" AC%4u", allocation_context().allocation_context());
st->print(" %2s", get_short_type_str());
if (in_collection_set())
st->print(" CS");
@@ -989,6 +1016,29 @@ void HeapRegion::verify(VerifyOption vo,
verify_strong_code_roots(vo, failures);
}
+
+
+void HeapRegion::set_allocation_context(AllocationContext_t context) {
+ assert(Heap_lock->owned_by_self() || SafepointSynchronize::is_at_safepoint(), "not locked");
+ if (TenantHeapIsolation && context != allocation_context() /* do not count self-set */) {
+ if (context.is_system()) {
+ assert(!allocation_context().is_system(), "pre-condition");
+ G1TenantAllocationContext* tac = allocation_context().tenant_allocation_context();
+ assert(NULL != tac, "pre-condition");
+ tac->dec_occupied_heap_region_count();
+ } else {
+ assert(allocation_context().is_system(), "pre-condition");
+ G1TenantAllocationContext* tac = context.tenant_allocation_context();
+ assert(NULL != tac, "pre-condition");
+ tac->inc_occupied_heap_region_count();
+ }
+ } else {
+ DEBUG_ONLY(assert(!TenantHeapIsolation
+ || (context.is_system() && allocation_context().is_system()), "just checking"));
+ }
+ _allocation_context = context;
+}
+
void HeapRegion::verify() const {
bool dummy = false;
verify(VerifyOption_G1UsePrevMarking, /* failures */ &dummy);
diff --git a/src/share/vm/gc_implementation/g1/heapRegion.hpp b/src/share/vm/gc_implementation/g1/heapRegion.hpp
index 9e77ecee7711523552a21d647b3edb4b0e6e6ad6..a03d214e68a24c9a517dad81a220c5de0bea76bc 100644
--- a/src/share/vm/gc_implementation/g1/heapRegion.hpp
+++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp
@@ -350,6 +350,12 @@ class HeapRegion: public G1OffsetTableContigSpace {
// All allocated blocks are occupied by objects in a HeapRegion
bool block_is_obj(const HeapWord* p) const;
+ // Get and set tenant allocation context of this heap region
+ const G1TenantAllocationContext* tenant_allocation_context() const {
+ assert(TenantHeapIsolation, "pre-condition");
+ return allocation_context().tenant_allocation_context();
+ }
+
// Returns the object size for all valid block starts
// and the amount of unallocated words if called on top()
size_t block_size(const HeapWord* p) const;
@@ -512,11 +518,9 @@ class HeapRegion: public G1OffsetTableContigSpace {
_next_in_special_set = r;
}
- void set_allocation_context(AllocationContext_t context) {
- _allocation_context = context;
- }
+ void set_allocation_context(AllocationContext_t context);
- AllocationContext_t allocation_context() const {
+ const AllocationContext_t& allocation_context() const {
return _allocation_context;
}
diff --git a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
index 00238e177f9b44427bdcf4dc0f6c551963e503f3..2573cdc239e34012d016eac745bba1e385ec072b 100644
--- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
+++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp
@@ -28,6 +28,7 @@
#include "gc_implementation/g1/heapRegion.hpp"
#include "gc_implementation/g1/heapRegionManager.inline.hpp"
#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
+#include "gc_implementation/g1/g1TenantAllocationContext.hpp"
#define VM_STRUCTS_G1(nonstatic_field, static_field) \
\
@@ -64,6 +65,10 @@
\
nonstatic_field(HeapRegionSetCount, _length, uint) \
nonstatic_field(HeapRegionSetCount, _capacity, size_t) \
+ nonstatic_field(G1TenantAllocationContext, _heap_size_limit, size_t) \
+ nonstatic_field(G1TenantAllocationContext, _heap_region_limit, size_t) \
+ nonstatic_field(G1TenantAllocationContext, _occupied_heap_region_count, size_t) \
+ nonstatic_field(G1TenantAllocationContext, _tenant_container, oop) \
#define VM_TYPES_G1(declare_type, declare_toplevel_type) \
@@ -84,6 +89,8 @@
declare_toplevel_type(HeapRegion*) \
declare_toplevel_type(G1MonitoringSupport*) \
declare_toplevel_type(G1Allocator*) \
+ declare_toplevel_type(G1TenantAllocationContext) \
+ declare_toplevel_type(G1TenantAllocationContext*) \
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP
diff --git a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
index 47267f21d5380345f479233e1f5b3a90eba232f4..a5b10b07b5df71d1a755d22b084ef18413998421 100644
--- a/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
+++ b/src/share/vm/gc_implementation/g1/vm_operations_g1.cpp
@@ -94,6 +94,8 @@ void VM_G1IncCollectionPause::doit() {
"only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle");
if (_word_size > 0) {
+ AllocationContextMark acm(this->allocation_context());
+
// An allocation has been requested. So, try to do that first.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
false /* expect_null_cur_alloc_region */);
@@ -147,6 +149,7 @@ void VM_G1IncCollectionPause::doit() {
_pause_succeeded =
g1h->do_collection_pause_at_safepoint(_target_pause_time_ms);
if (_pause_succeeded && _word_size > 0) {
+ AllocationContextMark acm(this->allocation_context());
// An allocation had been requested.
_result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(),
true /* expect_null_cur_alloc_region */);
diff --git a/src/share/vm/gc_interface/collectedHeap.cpp b/src/share/vm/gc_interface/collectedHeap.cpp
index 7b64c0273eed42c61eff0c61dfbf1144e21cce5b..535b6b8ca0747c6562abc3399e09b5d36fd1dba2 100644
--- a/src/share/vm/gc_interface/collectedHeap.cpp
+++ b/src/share/vm/gc_interface/collectedHeap.cpp
@@ -525,7 +525,16 @@ void CollectedHeap::ensure_parsability(bool retire_tlabs) {
"Attempt to fill tlabs before main thread has been added"
" to threads list is doomed to failure!");
for (JavaThread *thread = Threads::first(); thread; thread = thread->next()) {
- if (use_tlab) thread->tlab().make_parsable(retire_tlabs);
+ if (use_tlab) {
+ if (UsePerTenantTLAB) {
+ thread->make_all_tlabs_parsable(retire_tlabs,
+ // do not delete saved TLABs to make per-thread + per-tenant
+ // TLAB adaptive size policy to take effect
+ false /* delete saved TLABs */);
+ } else {
+ thread->tlab().make_parsable(retire_tlabs);
+ }
+ }
#ifdef COMPILER2
// The deferred store barriers must all have been flushed to the
// card-table (or other remembered set structure) before GC starts
diff --git a/src/share/vm/memory/allocation.hpp b/src/share/vm/memory/allocation.hpp
index aa8f02d09e16d92f2d64ecbed0127ab4f67860ad..6112a0ae64b5bc5767b857161d6ba3e1223f37df 100644
--- a/src/share/vm/memory/allocation.hpp
+++ b/src/share/vm/memory/allocation.hpp
@@ -154,8 +154,9 @@ enum MemoryType {
mtChunk = 0x0C, // chunk that holds content of arenas
mtTest = 0x0D, // Test type for verifying NMT
mtTracing = 0x0E, // memory used for Tracing
- mtNone = 0x0F, // undefined
- mt_number_of_types = 0x10 // number of memory types (mtDontTrack
+ mtTenant = 0x0F, // memory used by MultiTenant code
+ mtNone = 0x10, // undefined
+ mt_number_of_types = 0x11 // number of memory types (mtDontTrack
// is not included as validate type)
};
diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp
index 317384f23f75a1b9adbee98235e76df9d6298426..cae52b24583659be63a9dfc7900ee687981c75fc 100644
--- a/src/share/vm/memory/space.cpp
+++ b/src/share/vm/memory/space.cpp
@@ -45,6 +45,27 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
+#if INCLUDE_ALL_GCS
+CompactPoint& CompactPoint::operator = (const CompactPoint& cp) {
+ space = cp.space;
+ threshold = cp.threshold;
+ gen = cp.gen;
+ return *this;
+}
+
+CompactPoint& CompactPoint::operator = (const CachedCompactPoint& ccp) {
+ space = ccp.space;
+ threshold = ccp.threshold;
+ return *this;
+}
+
+CachedCompactPoint& CachedCompactPoint::operator = (const CompactPoint& cp) {
+ space = cp.space;
+ threshold = cp.threshold;
+ return *this;
+}
+#endif
+
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
diff --git a/src/share/vm/memory/space.hpp b/src/share/vm/memory/space.hpp
index a4c3f9e3d4df5f9c6e217c90af367f5bc927177f..819e87739209b6953f85b2ab47a53715f102308d 100644
--- a/src/share/vm/memory/space.hpp
+++ b/src/share/vm/memory/space.hpp
@@ -323,6 +323,10 @@ public:
#endif
};
+#if INCLUDE_ALL_GCS
+class CachedCompactPoint;
+#endif
+
// A structure to represent a point at which objects are being copied
// during compaction.
class CompactPoint : public StackObj {
@@ -333,8 +337,29 @@ public:
CompactPoint(Generation* g = NULL) :
gen(g), space(NULL), threshold(0) {}
+#if INCLUDE_ALL_GCS
+ CompactPoint& operator = (const CompactPoint&);
+ CompactPoint& operator = (const CachedCompactPoint&);
+#endif
+};
+
+#if INCLUDE_ALL_GCS
+
+// To cache CompactPoint info on C heap
+class CachedCompactPoint : public CHeapObj {
+public:
+ // Generation* is not used by G1, thus no need to cache it
+ CompactibleSpace* space; // cached space
+ HeapWord* threshold; // cached threshold
+
+public:
+ CachedCompactPoint() : space(NULL), threshold(NULL) {}
+ void reset() { space = NULL; threshold = NULL; }
+ CachedCompactPoint& operator = (const CompactPoint&);
};
+#endif
+
// A space that supports compaction operations. This is usually, but not
// necessarily, a space that is normally contiguous. But, for example, a
// free-list-based space whose normal collection is a mark-sweep without
diff --git a/src/share/vm/memory/threadLocalAllocBuffer.cpp b/src/share/vm/memory/threadLocalAllocBuffer.cpp
index ecb2ab7fc2c6c14d831967dc8b58c74e84c7a3b8..9cb790a0ae05cffc40d8c307738c190fe4f03d7c 100644
--- a/src/share/vm/memory/threadLocalAllocBuffer.cpp
+++ b/src/share/vm/memory/threadLocalAllocBuffer.cpp
@@ -209,6 +209,12 @@ void ThreadLocalAllocBuffer::initialize() {
_allocation_fraction.sample(alloc_frac);
}
+#if INCLUDE_ALL_GCS
+ if (UsePerTenantTLAB) {
+ _my_thread = Thread::current();
+ }
+#endif
+
set_refill_waste_limit(initial_refill_waste_limit());
initialize_statistics();
@@ -287,11 +293,39 @@ void ThreadLocalAllocBuffer::verify() {
}
Thread* ThreadLocalAllocBuffer::myThread() {
+#if INCLUDE_ALL_GCS
+ if (UsePerTenantTLAB) {
+ return _my_thread;
+ }
+#endif
return (Thread*)(((char *)this) +
in_bytes(start_offset()) -
in_bytes(Thread::tlab_start_offset()));
}
+#if INCLUDE_ALL_GCS
+
+void ThreadLocalAllocBuffer::swap_content(ThreadLocalAllocBuffer* peer) {
+ assert(UseG1GC && TenantHeapIsolation
+ && UsePerTenantTLAB && peer != NULL, "sanity");
+ assert(peer->myThread() == this->myThread()
+ && (Thread::current() == this->myThread() || SafepointSynchronize::is_at_safepoint()),
+ "only for self thread");
+
+ // do swapping
+ unsigned char buf[sizeof(ThreadLocalAllocBuffer)];
+ memcpy(buf, this, sizeof(ThreadLocalAllocBuffer));
+ memcpy(this, peer, sizeof(ThreadLocalAllocBuffer));
+ memcpy(peer, buf, sizeof(ThreadLocalAllocBuffer));
+
+ // restore linkage info
+ ThreadLocalAllocBuffer* tmp_next = this->next();
+ this->set_next(peer->next());
+ peer->set_next(tmp_next);
+}
+
+#endif // #if INCLUDE_ALL_GCS
+
GlobalTLABStats::GlobalTLABStats() :
_allocating_threads_avg(TLABAllocationWeight) {
diff --git a/src/share/vm/memory/threadLocalAllocBuffer.hpp b/src/share/vm/memory/threadLocalAllocBuffer.hpp
index 07308ff7ed33b7119ca2d63595b171127cd7e209..562d640b1f7b6b86d5a2a5f9274d02dd88f80820 100644
--- a/src/share/vm/memory/threadLocalAllocBuffer.hpp
+++ b/src/share/vm/memory/threadLocalAllocBuffer.hpp
@@ -31,6 +31,10 @@
class GlobalTLABStats;
+#if INCLUDE_ALL_GCS
+class G1TenantAllocationContext;
+#endif
+
// ThreadLocalAllocBuffer: a descriptor for thread-local storage used by
// the threads for allocation.
// It is thread-private at any time, but maybe multiplexed over
@@ -101,7 +105,14 @@ private:
static GlobalTLABStats* global_stats() { return _global_stats; }
public:
- ThreadLocalAllocBuffer() : _allocation_fraction(TLABAllocationWeight), _allocated_before_last_gc(0) {
+ ThreadLocalAllocBuffer()
+ : _allocation_fraction(TLABAllocationWeight),
+#if INCLUDE_ALL_GCS
+ _next(NULL),
+ _tenant_context(NULL),
+ _my_thread(NULL),
+#endif // #if INCLUDE_ALL_GCS
+ _allocated_before_last_gc(0) {
// do nothing. tlabs must be inited by initialize() calls
}
@@ -173,6 +184,31 @@ public:
static ByteSize slow_allocations_offset() { return byte_offset_of(ThreadLocalAllocBuffer, _slow_allocations ); }
void verify();
+
+#if INCLUDE_ALL_GCS
+private:
+ // retained TLAB support
+ G1TenantAllocationContext* _tenant_context; // Tenant allocation context associated with this TLAB
+ ThreadLocalAllocBuffer* _next; // linked to next TLAB
+
+ //
+ // This is a little redundant, but have to keep a pointer to its owner thread
+ // when -XX:+UsePerTenantTLAB specified. In that case, a TLAB object may be
+ // allocated directly on C heap, instead of being embeded in Thread object.
+ // Which means owner Thread address can no longer be calculated by using
+ // 'cur_tlab->_start + Thread::tlab_start_offset()'.
+ //
+ Thread* _my_thread;
+
+public:
+ G1TenantAllocationContext* tenant_allocation_context() const { return _tenant_context; }
+ void set_tenant_allocation_context(G1TenantAllocationContext* context) { _tenant_context = context; }
+ // retain/get per-tenant TLAB support
+ ThreadLocalAllocBuffer* next() const { return _next; }
+ void set_next(ThreadLocalAllocBuffer* next) { _next = next; }
+ // swap context with another TLABs
+ void swap_content(ThreadLocalAllocBuffer* peer);
+#endif // #if INCLUDE_ALL_GCS
};
class GlobalTLABStats: public CHeapObj {
diff --git a/src/share/vm/prims/jvm.cpp b/src/share/vm/prims/jvm.cpp
index b23917a750e01db7a0f859f0ea52cdee69010613..7108baac3f5481bc9fce597a1b003d0b5d85b440 100644
--- a/src/share/vm/prims/jvm.cpp
+++ b/src/share/vm/prims/jvm.cpp
@@ -3876,13 +3876,51 @@ JVM_END
/***************** Tenant support ************************************/
-JVM_ENTRY(void, JVM_AttachToTenant(JNIEnv *env, jobject tenant))
+JVM_ENTRY(jobject, JVM_TenantContainerOf(JNIEnv* env, jclass tenantContainerClass, jobject obj))
+ JVMWrapper("JVM_TenantContainerOf");
+ assert(MultiTenant && TenantHeapIsolation, "pre-condition");
+ if (NULL != obj) {
+ oop container = G1CollectedHeap::heap()->tenant_container_of(JNIHandles::resolve_non_null(obj));
+ if (container != NULL) {
+ return JNIHandles::make_local(env, container);
+ }
+ }
+ return NULL;
+JVM_END
+
+JVM_ENTRY(void, JVM_AttachToTenant(JNIEnv *env, jobject ignored, jobject tenant))
JVMWrapper("JVM_AttachToTenant");
assert(MultiTenant, "pre-condition");
assert (NULL != thread, "no current thread!");
thread->set_tenantObj(tenant == NULL ? (oop)NULL : JNIHandles::resolve_non_null(tenant));
JVM_END
+JVM_ENTRY(void, JVM_CreateTenantAllocationContext(JNIEnv *env, jobject ignored, jobject tenant, jlong heapLimit))
+ JVMWrapper("JVM_CreateTenantAllocationContext");
+ guarantee(UseG1GC && TenantHeapIsolation, "pre-condition");
+ oop tenant_obj = JNIHandles::resolve_non_null(tenant);
+ assert(tenant_obj != NULL, "Cannot create allocation context for a null tenant container");
+ G1CollectedHeap::heap()->create_tenant_allocation_context(tenant_obj);
+JVM_END
+
+// This method should be called before reclaiming of Java TenantContainer object
+JVM_ENTRY(void, JVM_DestroyTenantAllocationContext(JNIEnv *env, jobject ignored, jlong context))
+ JVMWrapper("JVM_DestroyTenantAllocationContext");
+ assert(UseG1GC && TenantHeapIsolation, "pre-condition");
+ oop tenant_obj = ((G1TenantAllocationContext*)context)->tenant_container();
+ assert(tenant_obj != NULL, "Cannot destroy allocation context from a null tenant container");
+ G1CollectedHeap::heap()->destroy_tenant_allocation_context(context);
+JVM_END
+
+JVM_ENTRY(jlong, JVM_GetTenantOccupiedMemory(JNIEnv* env, jobject ignored, jlong context))
+ JVMWrapper("JVM_GetTenantOccupiedMemory");
+ assert(UseG1GC && TenantHeapIsolation, "pre-condition");
+ G1TenantAllocationContext* alloc_context = (G1TenantAllocationContext*)context;
+ assert(alloc_context != NULL, "Bad allocation context!");
+ assert(alloc_context->tenant_container() != NULL, "NULL tenant container");
+ return (alloc_context->occupied_heap_region_count() * HeapRegion::GrainBytes);
+JVM_END
+
// Array ///////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/share/vm/prims/jvm.h b/src/share/vm/prims/jvm.h
index a1a3dd72f17113728a04c09fa57e5c6bf5f315ac..d694d4bfbbca2fdc0426fc4c891ea7b2bc4dad54 100644
--- a/src/share/vm/prims/jvm.h
+++ b/src/share/vm/prims/jvm.h
@@ -361,7 +361,19 @@ JVM_LoadClass0(JNIEnv *env, jobject obj, jclass currClass,
* com.alibaba.tenant.TenantContainer
*/
JNIEXPORT void JNICALL
-JVM_AttachToTenant(JNIEnv *env, jobject tenant);
+JVM_AttachToTenant(JNIEnv *env, jobject ignored, jobject tenant);
+
+JNIEXPORT void JNICALL
+JVM_CreateTenantAllocationContext(JNIEnv *env, jobject ignored, jobject tenant, jlong heapLimit);
+
+JNIEXPORT void JNICALL
+JVM_DestroyTenantAllocationContext(JNIEnv *env, jobject ignored, jlong context);
+
+JNIEXPORT jobject JNICALL
+JVM_TenantContainerOf(JNIEnv *env, jclass tenantContainerClass, jobject obj);
+
+JNIEXPORT long JNICALL
+JVM_GetTenantOccupiedMemory(JNIEnv *env, jobject ignored, jlong context);
/*
* java.lang.reflect.Array
diff --git a/src/share/vm/prims/tenantenv.cpp b/src/share/vm/prims/tenantenv.cpp
index e6577b64171035b20818c51adb9c7af5c5066f80..709a205515dbd82a29fb40ecc3dae43fdacc913b 100644
--- a/src/share/vm/prims/tenantenv.cpp
+++ b/src/share/vm/prims/tenantenv.cpp
@@ -28,6 +28,7 @@
*/
#define TENANT_FLAG_MULTI_TENANT_ENABLED (0x1) // bit 0 to indicate if the tenant feature is enabled.
+#define TENANT_FLAG_HEAP_ISOLATION_ENABLED (0x80) // bit 7 to indicate if heap isolation feature is enabled.
static jint tenant_GetTenantFlags(TenantEnv *env, jclass cls);
@@ -49,5 +50,9 @@ tenant_GetTenantFlags(TenantEnv *env, jclass cls)
result |= TENANT_FLAG_MULTI_TENANT_ENABLED;
}
+ if (TenantHeapIsolation) {
+ result |= TENANT_FLAG_HEAP_ISOLATION_ENABLED;
+ }
+
return result;
}
diff --git a/src/share/vm/prims/whitebox.cpp b/src/share/vm/prims/whitebox.cpp
index 4723a1b13e8a49eb2420bc61fbe7f45173109276..eef83824e96ae4efe64543a0a53fb86b5f430b38 100644
--- a/src/share/vm/prims/whitebox.cpp
+++ b/src/share/vm/prims/whitebox.cpp
@@ -1403,6 +1403,17 @@ WB_ENTRY(jboolean, WB_TestFixDanglingPointerInDeopt(JNIEnv* env, jobject o, jstr
}
return true;
WB_END
+
+WB_ENTRY(jboolean, WB_IsInCurrentTLAB(JNIEnv* env, jobject wb, jobject o))
+ ThreadToNativeFromVM ttn(thread);
+ if (o != NULL) {
+ HeapWord* addr = (HeapWord*)JNIHandles::resolve_non_null(o);
+ ThreadLocalAllocBuffer& tlab = Thread::current()->tlab();
+ return (addr >= tlab.start() && addr < tlab.end()) ? JNI_TRUE : JNI_FALSE;
+ }
+ return JNI_FALSE;
+WB_END
+
#define CC (char*)
static JNINativeMethod methods[] = {
@@ -1538,7 +1549,8 @@ static JNINativeMethod methods[] = {
{CC"testFixDanglingPointerInDeopt",
CC"(Ljava/lang/String;)Z", (void*)&WB_TestFixDanglingPointerInDeopt},
{CC"getClassInitOrderList", CC"()[Ljava/lang/String;",
- (void*)&WB_GetClassInitOrderList }
+ (void*)&WB_GetClassInitOrderList },
+ {CC"isInCurrentTLAB", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsInCurrentTLAB },
};
#undef CC
diff --git a/src/share/vm/runtime/arguments_ext.cpp b/src/share/vm/runtime/arguments_ext.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..17a564787672dd819338aa2539f6663e618ab062
--- /dev/null
+++ b/src/share/vm/runtime/arguments_ext.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#include "runtime/arguments_ext.hpp"
+#include "runtime/java.hpp"
+
+void ArgumentsExt::set_tenant_flags() {
+ // TenantHeapIsolation directly depends on MultiTenant, UseG1GC
+ if (TenantHeapIsolation) {
+ if (FLAG_IS_DEFAULT(MultiTenant)) {
+ FLAG_SET_ERGO(bool, MultiTenant, true);
+ }
+ if (UseTLAB && FLAG_IS_DEFAULT(UsePerTenantTLAB)) {
+ // enable per-tenant TLABs if unspecified and heap isolation is enabled
+ FLAG_SET_ERGO(bool, UsePerTenantTLAB, true);
+ }
+
+ // check GC policy compatibility
+ if (!UseG1GC) {
+ vm_exit_during_initialization("-XX:+TenantHeapIsolation only works with -XX:+UseG1GC");
+ }
+ if (!MultiTenant) {
+ vm_exit_during_initialization("Cannot use multi-tenant features if -XX:-MultiTenant specified");
+ }
+ }
+
+ // UsePerTenantTLAB depends on TenantHeapIsolation and UseTLAB
+ if (UsePerTenantTLAB) {
+ if (!TenantHeapIsolation || !UseTLAB) {
+ vm_exit_during_initialization("-XX:+UsePerTenantTLAB only works with -XX:+TenantHeapIsolation and -XX:+UseTLAB");
+ }
+ }
+}
diff --git a/src/share/vm/runtime/arguments_ext.hpp b/src/share/vm/runtime/arguments_ext.hpp
index 26a311f7cb3ee7a80795e726d2182f811454bf6c..4129c6b4b6d2c31719ca54382a73a910b0cc9a0c 100644
--- a/src/share/vm/runtime/arguments_ext.hpp
+++ b/src/share/vm/runtime/arguments_ext.hpp
@@ -29,6 +29,8 @@
#include "runtime/arguments.hpp"
class ArgumentsExt: AllStatic {
+private:
+ static void set_tenant_flags();
public:
static inline void set_gc_specific_flags();
static void process_options(const JavaVMInitArgs* args) {}
@@ -36,6 +38,8 @@ public:
void ArgumentsExt::set_gc_specific_flags() {
Arguments::set_gc_specific_flags();
+
+ set_tenant_flags();
}
#endif // SHARE_VM_RUNTIME_ARGUMENTS_EXT_HPP
diff --git a/src/share/vm/runtime/globals_ext.hpp b/src/share/vm/runtime/globals_ext.hpp
index 6c654f2ade39c1d110f8442c9255eaf7a214e3c9..402f6500a42ca12bf0bf47dd2775f370c0af6835 100644
--- a/src/share/vm/runtime/globals_ext.hpp
+++ b/src/share/vm/runtime/globals_ext.hpp
@@ -95,7 +95,13 @@
"commit/uncommit. 0 be same as ConcGCThreads") \
\
product(bool, MultiTenant, false, \
- "Enable the multi-tenant feature.") \
+ "Enable the multi-tenant feature.") \
+ \
+ product(bool, TenantHeapIsolation, false, \
+ "Isolates heap memory used by different TenantContainers") \
+ \
+ product(bool, UsePerTenantTLAB, false, \
+ "Mutator may maintain multiple TLABs for each of the tenants") \
//add new AJVM specific flags here
diff --git a/src/share/vm/runtime/thread.cpp b/src/share/vm/runtime/thread.cpp
index 88b95acf61d7e2de1f56e8250716ec683ed3be98..5a3389475ce0405db9eed51d4e5c98bede690aa1 100644
--- a/src/share/vm/runtime/thread.cpp
+++ b/src/share/vm/runtime/thread.cpp
@@ -307,6 +307,10 @@ Thread::Thread() {
"bug in forced alignment of thread objects");
}
#endif /* ASSERT */
+
+ if (UseG1GC) {
+ _alloc_context = AllocationContext::system();
+ }
}
void Thread::initialize_thread_local_storage() {
@@ -1921,7 +1925,11 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
remove_stack_guard_pages();
if (UseTLAB) {
- tlab().make_parsable(true); // retire TLAB
+ if (UsePerTenantTLAB) {
+ make_all_tlabs_parsable(true, true);
+ } else {
+ tlab().make_parsable(true); // retire TLAB
+ }
}
if (JvmtiEnv::environments_might_exist()) {
@@ -1947,6 +1955,148 @@ void JavaThread::exit(bool destroy_vm, ExitType exit_type) {
Threads::remove(this);
}
+// NOTE: active TLABs will only be retired & deleted at safepoint or thread ending.
+// it is OK to destroy a G1TenantAllocationContext while its previously
+// used TLABs are still linked in any mutator threads, because no further alloc requests
+// can happen on this stale TLAB, its remaining free space cannot be
+// used by any other threads or tenants either.
+
+#if INCLUDE_ALL_GCS
+void Thread::make_all_tlabs_parsable(bool retire, bool delete_saved) {
+ assert(UseG1GC && TenantHeapIsolation
+ && UseTLAB && UsePerTenantTLAB, "pre-condition");
+
+ for (ThreadLocalAllocBuffer* tlab = &(this->tlab()); tlab != NULL;
+ tlab = tlab->next()) {
+ tlab->make_parsable(retire);
+ }
+
+ if (delete_saved) {
+ assert(retire, "should only delete after retire!");
+ ThreadLocalAllocBuffer* tlab = this->tlab().next();
+ while (tlab != NULL) {
+ ThreadLocalAllocBuffer* next = tlab->next();
+ delete tlab;
+ tlab = next;
+ }
+
+ this->tlab().set_next(NULL);
+ }
+}
+
+void Thread::clean_tlab_for(const G1TenantAllocationContext* context) {
+ assert(UseG1GC && TenantHeapIsolation
+ && UseTLAB && UsePerTenantTLAB, "sanity");
+ assert(SafepointSynchronize::is_at_safepoint()
+ && Thread::current()->is_VM_thread(), "pre-condition");
+ guarantee(context != G1TenantAllocationContexts::system_context(),
+ "never clean root tenant context");
+
+ if (this->is_Java_thread()) {
+ JavaThread* java_thread = (JavaThread*)this;
+ // make sure TLAB's tenant allocation context is same as Java thread's
+ guarantee(java_thread->tenant_allocation_context() == this->tlab().tenant_allocation_context(),
+ err_msg("Inconsistent tenant allocation context thread=" PTR_FORMAT ",context=" PTR_FORMAT
+ ", but its TLAB's context=" PTR_FORMAT,
+ java_thread,
+ java_thread->tenant_allocation_context(),
+ this->tlab().tenant_allocation_context()));
+ }
+
+ // if the to-be-deleted context is current active context,
+ // we just completely switch to ROOT tenant's TLAB
+ const G1TenantAllocationContext* context_to_search =
+ this->tlab().tenant_allocation_context() == context ? G1TenantAllocationContexts::system_context() : context;
+
+ for (ThreadLocalAllocBuffer* tlab = &(this->tlab()), *prev = NULL;
+ tlab != NULL;
+ prev = tlab, tlab = tlab->next())
+ {
+ if (tlab->tenant_allocation_context() == context_to_search) {
+ guarantee(prev != NULL, "Cannot be an in-use TLAB");
+ if (context_to_search == G1TenantAllocationContexts::system_context()) {
+ guarantee(this->tlab().tenant_allocation_context() == context,
+ "must be in-use TLAB");
+ guarantee(tlab != &(this->tlab()),
+ "Cannot be root context");
+ this->tlab().make_parsable(true);
+ if (this->is_Java_thread()) {
+ // set_tenantObj will do search and swap, without changing the list structure
+ ((JavaThread*)this)->set_tenantObj(NULL);
+ } else {
+ this->tlab().swap_content(tlab);
+ }
+ } else {
+ guarantee(this->tlab().tenant_allocation_context() != context,
+ "cannot be in-use TLAB");
+ tlab->make_parsable(true);
+ }
+ // remove the 'dead' TLAB from list
+ ThreadLocalAllocBuffer* next_tlab = tlab->next();
+ prev->set_next(next_tlab);
+ delete tlab;
+ return;
+ }
+ }
+}
+
+const AllocationContext_t& Thread::allocation_context() const {
+ assert(UseG1GC, "Only G1 policy supported");
+ return _alloc_context;
+}
+void Thread::set_allocation_context(AllocationContext_t context) {
+ assert(UseG1GC, "Only G1 policy supported");
+ assert(Thread::current() == this
+ || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()
+ && VMThread::vm_operation() != NULL
+ && VMThread::vm_operation()->type() == VM_Operation::VMOp_DestroyG1TenantAllocationContext),
+ "Only allowed to be set by self thread or tenant destruction vm_op");
+
+ _alloc_context = context;
+
+ if (UseG1GC && TenantHeapIsolation
+ && UseTLAB
+ && this->is_Java_thread()) { // only for Java thread, though _tlab is in Thread
+
+ if (UsePerTenantTLAB) {
+ G1TenantAllocationContext* tac = context.tenant_allocation_context();
+ ThreadLocalAllocBuffer* tlab = &(this->tlab());
+ assert(tlab != NULL, "Attach to same tenant twice!");
+
+ if (tlab->tenant_allocation_context() == tac) {
+ // no need to switch TLAB
+ assert(tac == G1TenantAllocationContexts::system_context(),
+ "Must be ROOT allocation context");
+ return;
+ }
+
+ // traverse saved TLAB list to search used TALBs for 'tac'
+ do {
+ tlab = tlab->next();
+ if (tlab != NULL && tlab->tenant_allocation_context() == tac) {
+ this->tlab().swap_content(tlab);
+ break;
+ }
+ } while (tlab != NULL);
+
+ // cannot find a saved TLAB, this is the first time current thread running into 'tac'
+ if (tlab == NULL) {
+ ThreadLocalAllocBuffer* new_tlab = new ThreadLocalAllocBuffer();
+ new_tlab->initialize();
+ // link to list
+ new_tlab->set_next(this->tlab().next());
+ new_tlab->set_tenant_allocation_context(tac);
+ this->tlab().set_next(new_tlab);
+ // make the new TLAB active
+ this->tlab().swap_content(new_tlab);
+ }
+ } else {
+ tlab().make_parsable(true /* retire */);
+ }
+ }
+}
+#endif // INCLUDE_ALL_GCS
+
#if INCLUDE_ALL_GCS
// Flush G1-related queues.
void JavaThread::flush_barrier_queues() {
@@ -1954,6 +2104,64 @@ void JavaThread::flush_barrier_queues() {
dirty_card_queue().flush();
}
+G1TenantAllocationContext* JavaThread::tenant_allocation_context() {
+ assert(TenantHeapIsolation, "pre-condition");
+
+ oop tenant_obj = tenantObj();
+ return (tenant_obj == NULL ? NULL : com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(tenant_obj));
+}
+
+void JavaThread::set_tenant_allocation_context(G1TenantAllocationContext* context) {
+ assert(TenantHeapIsolation, "pre-condition");
+ set_tenantObj(context == NULL ? (oop)NULL : context->tenant_container());
+}
+
+void JavaThread::set_tenantObj(oop tenantObj) {
+ assert(MultiTenant
+ // prevent duplicate assigning values of non-ROOT tenant;
+ // but allow duplicated values of ROOT tenant, to support
+ // TenantContainer.destroy() while alive threads attached
+ && (_tenantObj != tenantObj || tenantObj == NULL),
+ "pre-condition");
+
+ if (_tenantObj == tenantObj) {
+ return;
+ }
+
+ oop prev_tenant = _tenantObj;
+ _tenantObj = tenantObj;
+
+#if INCLUDE_ALL_GCS
+ if (UseG1GC) {
+ set_allocation_context(AllocationContext_t(tenantObj == NULL ?
+ NULL : com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(tenantObj)));
+
+#ifndef PRODUCT
+ if (TenantHeapIsolation && UseTLAB && UsePerTenantTLAB) {
+ G1TenantAllocationContext* prev_context = prev_tenant == NULL ? NULL /* root tenant */
+ : com_alibaba_tenant_TenantContainer::get_tenant_allocation_context(prev_tenant);
+ //
+ // thread was attached to tenant container whose allocation context is ROOT tenant's,
+ // which means the tenant container is DEAD.
+ //
+ // in current implementation, inconsistency between TenantContainer object
+ // and its G1TenantAllocationContext pointer is allowed.
+ // when a TenantContainer is destroyed before all attached threads get detached,
+ // JVM will just switch all the allocation contexts of attached threads to ROOT tenant,
+ // including the pointer recorded in TenantContainer object.
+ //
+ if (prev_tenant != NULL && prev_context == G1TenantAllocationContexts::system_context()) {
+ assert(com_alibaba_tenant_TenantContainer::is_dead(prev_tenant),
+ "Must be dead TenantContainer");
+ }
+ }
+#endif
+ }
+#endif // #if INCLUDE_ALL_GCS
+}
+
+
+
void JavaThread::initialize_queues() {
assert(!SafepointSynchronize::is_at_safepoint(),
"we should not be at a safepoint");
@@ -2000,7 +2208,11 @@ void JavaThread::cleanup_failed_attach_current_thread() {
remove_stack_guard_pages();
if (UseTLAB) {
- tlab().make_parsable(true); // retire TLAB, if any
+ if (UsePerTenantTLAB) {
+ this->make_all_tlabs_parsable(true, false);
+ } else {
+ tlab().make_parsable(true); // retire TLAB, if any
+ }
}
#if INCLUDE_ALL_GCS
@@ -4733,7 +4945,6 @@ void Thread::muxRelease (volatile intptr_t * Lock) {
}
}
-
void Threads::verify() {
ALL_JAVA_THREADS(p) {
p->verify();
diff --git a/src/share/vm/runtime/thread.hpp b/src/share/vm/runtime/thread.hpp
index 9b2341cf6fbd8f416b28533ca9a3dbf549b2680f..37ffa8185be65fb5c6f994dfec387fd1acf03e07 100644
--- a/src/share/vm/runtime/thread.hpp
+++ b/src/share/vm/runtime/thread.hpp
@@ -81,6 +81,10 @@ class GCTaskQueue;
class ThreadClosure;
class IdealGraphPrinter;
+#if INCLUDE_ALL_GCS
+class G1TenantAllocationContext;
+#endif
+
class Metadata;
template class ChunkedList;
typedef ChunkedList MetadataOnStackBuffer;
@@ -457,6 +461,11 @@ class Thread: public ThreadShadow {
tlab().initialize();
}
}
+#if INCLUDE_ALL_GCS
+ void make_all_tlabs_parsable(bool retire, bool delete_saved);
+ // called during tenantContainer destruction
+ void clean_tlab_for(const G1TenantAllocationContext* context);
+#endif // if INCLUDE_ALL_GCS
jlong allocated_bytes() { return _allocated_bytes; }
void set_allocated_bytes(jlong value) { _allocated_bytes = value; }
@@ -677,6 +686,13 @@ protected:
static void muxAcquire (volatile intptr_t * Lock, const char * Name) ;
static void muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) ;
static void muxRelease (volatile intptr_t * Lock) ;
+
+private:
+ AllocationContext_t _alloc_context; // context for Java allocation requests
+ // put it here because allocation may happen in VM thread
+public:
+ const AllocationContext_t& allocation_context() const;
+ void set_allocation_context(AllocationContext_t context);
};
// Inline implementation of Thread::current()
@@ -1051,7 +1067,13 @@ class JavaThread: public Thread {
// Get/set the tenant which the thread is attached to
oop tenantObj() const { return _tenantObj; }
- void set_tenantObj(oop tenantObj) { _tenantObj = tenantObj; }
+ void set_tenantObj(oop tenantObj);
+
+#if INCLUDE_ALL_GCS
+ G1TenantAllocationContext* tenant_allocation_context();
+
+ void set_tenant_allocation_context(G1TenantAllocationContext* context);
+#endif
ThreadPriority java_priority() const; // Read from threadObj()
diff --git a/src/share/vm/runtime/thread.inline.hpp b/src/share/vm/runtime/thread.inline.hpp
index b68e7d1ae38d758755fec8837f5542f5160d4885..9516669dee5e2d0ca5779139148656371c18c5f5 100644
--- a/src/share/vm/runtime/thread.inline.hpp
+++ b/src/share/vm/runtime/thread.inline.hpp
@@ -49,6 +49,17 @@
inline jlong Thread::cooked_allocated_bytes() {
jlong allocated_bytes = OrderAccess::load_acquire(&_allocated_bytes);
if (UseTLAB) {
+ if (MultiTenant && UsePerTenantTLAB) {
+ // accumulate used_bytes from all TLABs
+ size_t used_bytes = 0;
+ for (ThreadLocalAllocBuffer* tlab = &_tlab;
+ tlab != NULL;
+ tlab = tlab->next()) {
+ used_bytes += tlab->used_bytes();
+ }
+ return allocated_bytes + used_bytes;
+ }
+
size_t used_bytes = tlab().used_bytes();
if ((ssize_t)used_bytes > 0) {
// More-or-less valid tlab. The load_acquire above should ensure
diff --git a/src/share/vm/runtime/thread_ext.hpp b/src/share/vm/runtime/thread_ext.hpp
index 6f3a5a36fc291ccd6fd0b4e76f80bdd0fe6fa12d..d767469e78e86929b0c584f57baa46728f3099cd 100644
--- a/src/share/vm/runtime/thread_ext.hpp
+++ b/src/share/vm/runtime/thread_ext.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_RUNTIME_THREAD_EXT_HPP
#include "memory/allocation.hpp"
+#include "gc_implementation/g1/g1AllocationContext.hpp"
class ThreadExt VALUE_OBJ_CLASS_SPEC {
public:
diff --git a/src/share/vm/runtime/vm_operations.hpp b/src/share/vm/runtime/vm_operations.hpp
index cc32b97ca24abbc309935f710eeb6d46e6de9f13..6ad9615dbdd35f105674153cc26a2beb52201443 100644
--- a/src/share/vm/runtime/vm_operations.hpp
+++ b/src/share/vm/runtime/vm_operations.hpp
@@ -98,6 +98,7 @@
template(RotateGCLog) \
template(WhiteBoxOperation) \
template(ClassLoaderStatsOperation) \
+ template(DestroyG1TenantAllocationContext) \
class VM_Operation: public CHeapObj {
public:
diff --git a/src/share/vm/utilities/hashtable.cpp b/src/share/vm/utilities/hashtable.cpp
index 71fa221b6e639a5fa9b1b3c38af721af8283f65f..666df5513f6fa7512ff8fc113fe2407bcc941bb6 100644
--- a/src/share/vm/utilities/hashtable.cpp
+++ b/src/share/vm/utilities/hashtable.cpp
@@ -58,6 +58,9 @@ template BasicHashtableEntry* BasicHashtable::new_entry(unsig
len = 1 << log2_int(len); // round down to power of 2
assert(len >= _entry_size, "");
_first_free_entry = NEW_C_HEAP_ARRAY2(char, len, F, CURRENT_PC);
+ if (NULL != _memory_blocks) {
+ _memory_blocks->append(_first_free_entry);
+ }
_end_block = _first_free_entry + len;
}
entry = (BasicHashtableEntry*)_first_free_entry;
@@ -337,6 +340,9 @@ template void BasicHashtable::copy_buckets(char** top, char* end
*top += len;
}
+unsigned int HashMapUtil::hash(oop o) {
+ return (unsigned int)ObjectSynchronizer::FastHashCode(JavaThread::current(), o);
+}
#ifndef PRODUCT
@@ -405,6 +411,7 @@ template class HashtableEntry;
template class HashtableEntry;
template class BasicHashtableEntry;
template class BasicHashtableEntry;
+template class BasicHashtableEntry;
template class BasicHashtable;
template class BasicHashtable;
template class BasicHashtable;
@@ -413,3 +420,174 @@ template class Hashtable;
template class Hashtable;
template class BasicHashtable;
template class Hashtable;
+template class BasicHashtable;
+template class BasicHashtable;
+
+#ifndef PRODUCT
+// Testcase for HashMap
+
+// customized key type for testing
+class TestKey VALUE_OBJ_CLASS_SPEC {
+private:
+ int i;
+public:
+ TestKey(int i_) : i(i_) { }
+ bool operator == (const TestKey& tc) { return i == tc.i; }
+ bool operator != (const TestKey& tc) { return i != tc.i; }
+
+ unsigned int hash_code() { return *(unsigned int*)this; }
+};
+
+class HashMapTest : public AllStatic {
+public:
+ static void test_basic();
+
+ static void test_customized_keytype();
+
+ static void test_for_each();
+
+ static void test_map_Iterator();
+
+ static void test_clear();
+};
+
+void HashMapTest::test_basic() {
+ // integer as hash key
+ HashMap hm(8);
+ hm.put(10, 10);
+ hm.put(20, 2);
+
+ assert(hm.contains(10), "");
+ assert(hm.contains(20), "");
+ assert(!hm.contains(30), "");
+ assert(!hm.contains(0), "");
+ assert(10 == hm.get(10)->value(), "");
+ assert(2 == hm.get(20)->value(), "");
+
+ // should overwrite
+ hm.put(10, 11);
+ assert(11 == hm.get(10)->value(), "");
+ hm.put(20, 3);
+ assert(3 == hm.get(20)->value(), "");
+
+ // remove test
+ hm.put(18, 3);
+ assert(3 == hm.remove(18)->value(), "");
+ assert(!hm.contains(18), "");
+ assert(NULL == hm.remove(18), "");
+
+ assert(11 == hm.remove(10)->value(), "");
+ assert(!hm.contains(10), "");
+ assert(NULL == hm.remove(10), "");
+
+ // pointer as hash key
+ HashMap map2(8);
+ void* p = &hm;
+ map2.put(p, 10);
+ assert(map2.contains(p), "");
+ assert(!map2.contains(NULL), "");
+ assert(10 == map2.get(p)->value(), "");
+
+ // test overwrite
+ map2.put(p, 20);
+ assert(20 == map2.get(p)->value(), "");
+}
+
+void HashMapTest::test_customized_keytype() {
+ HashMap map(8);
+ TestKey k1(1), k2(2);
+
+ assert(0 == map.number_of_entries(), "");
+ map.put(k1, 2);
+ assert(map.contains(k1), "");
+ assert(2 == map.get(k1)->value(), "");
+ map.put(k1, 3);
+ assert(3 == map.get(k1)->value(), "");
+ assert(1 == map.number_of_entries(), "");
+
+ map.put(k1, 1);
+ map.put(k2, 2);
+ assert(2 == map.number_of_entries(), "");
+ assert(2 == map.get(k2)->value(), "");
+ assert(1 == map.get(k1)->value(), "");
+}
+
+void HashMapTest::test_for_each() {
+ HashMap hm(32);
+
+ for (int i = 0; i < 32; ++i) {
+ hm.put(i, i + 1);
+ assert((i + 1) == hm.number_of_entries(), "");
+ }
+
+ for (HashMapIterator itr = hm.begin();
+ itr != hm.end(); ++itr) {
+ assert(hm.contains(itr->key()), "");
+ // bad to modify during iteration, but this is just a test
+ hm.put(itr->key(), 1 + itr->value());
+ }
+
+ assert(32 == hm.number_of_entries(), "");
+
+ for (int i = 0; i < 32; ++i) {
+ assert(hm.contains(i), "");
+ assert((i + 2) == hm.get(i)->value(), "");
+ }
+}
+
+void HashMapTest::test_map_Iterator() {
+ HashMap map(8);
+ AllocationContext_t ac((G1TenantAllocationContext*)&map);
+
+ assert(map.number_of_entries() == 0, "");
+ HashMap::Iterator bg = map.begin(), ed = map.end();
+ // test different operators
+ assert(bg == ed, "");
+ assert(!(bg != ed), "");
+ assert(bg._idx == ed._idx, "");
+ assert(bg._cur == ed._cur, "");
+ assert(bg._map == ed._map, "");
+
+ map.put(ac, 1);
+ assert(map.contains(ac), "");
+ assert(map.get(ac)->value() == 1, "");
+ assert(map.number_of_entries() == 1, "");
+
+ bg = map.begin();
+
+ assert(bg != ed, "");
+ assert(!(bg == ed), "");
+ assert(bg._idx != ed._idx, "");
+ assert(bg._cur != ed._cur, "");
+ assert(bg._map == ed._map, "");
+
+ HashMap::Entry& entry = *bg;
+ assert(ac == entry._key, "");
+ assert(1 == entry._value, "");
+ assert(ac == bg->_key, "");
+ assert(1 == bg->_value, "");
+}
+
+void HashMapTest::test_clear() {
+ HashMap map(16);
+ for (int i = 0; i < 32; ++i) {
+ map.put(i, i + 1);
+ }
+ assert(map.number_of_entries() == 32, "");
+ map.clear();
+ assert(map.number_of_entries() == 0, "");
+ for (int i = 0; i < 32; ++i) {
+ map.put(i, i + 1);
+ }
+ assert(map.number_of_entries() == 32, "");
+}
+
+// Test case
+void Test_HashMap() {
+ HashMapTest::test_basic();
+ HashMapTest::test_for_each();
+ HashMapTest::test_customized_keytype();
+ HashMapTest::test_map_Iterator();
+ HashMapTest::test_clear();
+}
+#endif // PRODUCT
diff --git a/src/share/vm/utilities/hashtable.hpp b/src/share/vm/utilities/hashtable.hpp
index d08deffcb1159cce8c8f0fba4cba226c7534bb60..d03c95c7786b8be88ed211dd8140507302bc64c8 100644
--- a/src/share/vm/utilities/hashtable.hpp
+++ b/src/share/vm/utilities/hashtable.hpp
@@ -177,6 +177,8 @@ protected:
int _lookup_length;
void verify_lookup_length(double load);
#endif
+ // to record allocated memory chunks, only used by DeallocatableHashtable
+ GrowableArray* _memory_blocks;
void initialize(int table_size, int entry_size, int number_of_entries);
@@ -246,6 +248,43 @@ public:
void verify() PRODUCT_RETURN;
};
+//
+// A derived BasicHashtable with dynamic memory deallocation support
+//
+// BasicHashtable will permanently hold memory allocated via
+// NEW_C_HEAP_ARRAY2 without releasing, because it is intended
+// to be used for global data structures like SymbolTable.
+// Below implementation just deallocates memory chunks in destructor,
+// thus may be used as transient data structure.
+//
+template class DeallocatableHashtable : public BasicHashtable {
+public:
+ DeallocatableHashtable(int table_size, int entry_size)
+ : BasicHashtable(table_size, entry_size)
+ {
+ // will be released in BasicHashtable::release_memory()
+ GrowableArray*& mem_blocks = BasicHashtable::_memory_blocks;
+ mem_blocks = new (ResourceObj::C_HEAP, F) GrowableArray(0x4 /* initial size */,
+ true /* on C heap */, F);
+ assert(NULL != mem_blocks, "pre-condition");
+ }
+
+ ~DeallocatableHashtable() {
+ BasicHashtable::free_buckets();
+ GrowableArray*& mem_blocks = BasicHashtable::_memory_blocks;
+ assert(NULL != mem_blocks, "pre-condition");
+
+ for (GrowableArrayIterator itr = mem_blocks->begin();
+ itr != mem_blocks->end(); ++itr) {
+ FREE_C_HEAP_ARRAY(char*, *itr, F);
+ }
+ mem_blocks->clear();
+
+ delete mem_blocks;
+ mem_blocks = NULL;
+ }
+};
+
template class Hashtable : public BasicHashtable {
friend class VMStructs;
@@ -361,4 +400,289 @@ public:
}
};
+//======================================================================
+// A hash map implementation based on hashtable
+//======================================================================
+
+// forward declaration
+template class HashMap;
+template class HashMapIterator;
+
+// utility classes to extract hash_code from various types
+class HashMapUtil : public AllStatic {
+public:
+ // for integers, use its own value as hash
+ static unsigned int hash(long l) { return *(unsigned int*)&l; }
+ static unsigned int hash(int i) { return (unsigned int)i; }
+ static unsigned int hash(short s) { return (unsigned int)s; }
+ static unsigned int hash(char c) { return (unsigned int)c; }
+ static unsigned int hash(unsigned long sz) { return (unsigned int)(sz & 0xFFFFFFFF); }
+
+ // use the middle bits of address value
+ static unsigned int hash(void *p) {
+#ifdef _LP64
+ uint64_t val = *(uint64_t*)&p;
+ return (unsigned int)((val & 0xFFFFFFFF) >> 3);
+#else
+ uint64_t val = *(uint32_t*)&p;
+ return (unsigned int)((val & 0xFFFF) >> 2);
+#endif // _LP64
+ }
+
+ static unsigned int hash(oop o);
+
+ static unsigned int hash(Handle h) { return hash(h()); }
+
+ // a general contract of getting hash code for all non pre-defined types
+ // the type must define a non-static member method `unsigned int hash_code()`
+ // to return an `identical` unsigned int value as hash code.
+ template
+ static unsigned int hash(T t) { return t.hash_code(); }
+};
+
+// entry type
+template
+class HashMapEntry : public BasicHashtableEntry {
+ friend class HashMap;
+#ifndef PRODUCT
+ friend class HashMapTest;
+#endif // PRODUCT
+
+private:
+ K _key;
+ V _value;
+
+public:
+ HashMapEntry* next() {
+ return (HashMapEntry*)BasicHashtableEntry::next();
+ }
+
+ void set_next(HashMapEntry* next) {
+ BasicHashtableEntry::set_next((BasicHashtableEntry*)next);
+ }
+
+ K key() { return _key; }
+ V value() { return _value; }
+ K* key_addr() { return &_key; }
+ V* value_addr() { return &_value; }
+};
+
+//
+// hash map class implemented in C++ based on BasicHashtable
+// - unordered
+// - unique
+// - not MT-safe
+// - deallocatable
+//
+template
+class HashMap : public DeallocatableHashtable {
+ friend class HashMapIterator;
+public:
+ typedef HashMapEntry Entry;
+ typedef HashMapIterator Iterator;
+ // alternative hashing function
+ typedef int (*AltHashFunc) (K);
+
+private:
+ // if table_size == 1, will cause new_entry() to fail
+ // have to allocate table with larger size, here using 0x4
+ static const int MIN_TABLE_SIZE = 0x4;
+
+ // alternativ hashing method
+ AltHashFunc* _alt_hasher;
+
+protected:
+ unsigned int compute_hash(K k) {
+ if (_alt_hasher != NULL) {
+ return ((AltHashFunc)_alt_hasher)(k);
+ }
+ return HashMapUtil::hash(k);
+ }
+
+ Entry* bucket(int index) {
+ return (Entry*)BasicHashtable::bucket(index);
+ }
+
+ Entry* get_entry(int index, unsigned int hash, K k) {
+ for (Entry* pp = bucket(index); pp != NULL; pp = pp->next()) {
+ if (pp->hash() == hash && pp->_key == k) {
+ return pp;
+ }
+ }
+ return NULL;
+ }
+
+ Entry* get_entry(K k) {
+ unsigned int hash = compute_hash(k);
+ return get_entry(BasicHashtable::hash_to_index(hash), hash, k);
+ }
+
+ Entry* new_entry(K k, V v) {
+ unsigned int hash = compute_hash(k);
+ Entry* pp = (Entry*)BasicHashtable::new_entry(hash);
+ pp->_key = k;
+ pp->_value = v;
+ return pp;
+ }
+
+ void add_entry(Entry* pp) {
+ int index = BasicHashtable::hash_to_index(pp->hash());
+ BasicHashtable::add_entry(index, pp);
+ }
+
+public:
+ HashMap(int table_size)
+ : DeallocatableHashtable((table_size < MIN_TABLE_SIZE ? MIN_TABLE_SIZE : table_size),
+ sizeof(Entry)),
+ _alt_hasher(NULL)
+ { }
+
+ // Associates the specified value with the specified key in this map.
+ // If the map previously contained a mapping for the key, the old value is replaced.
+ void put(K k, V v) {
+ Entry* e = get_entry(k);
+ if (NULL != e) {
+ e->_value = v;
+ } else {
+ Entry* e = new_entry(k, v);
+ assert(NULL != e, "cannot create new entry");
+ add_entry(e);
+ }
+ }
+
+ Entry* remove(K k) {
+ int index = this->hash_to_index(compute_hash(k));
+ Entry *e = bucket(index);
+ Entry *prev = NULL;
+ for (; e != NULL ; prev = e, e = e->next()) {
+ if (e->_key == k) {
+ if (prev != NULL) {
+ prev->set_next(e->next());
+ } else {
+ this->set_entry(index, e->next());
+ }
+ this->free_entry(e);
+ return e;
+ }
+ }
+ return NULL;
+ }
+
+ // Returns true if this map contains a mapping for the specified key
+ bool contains(K k) {
+ return NULL != get_entry(k);
+ }
+
+ // Returns the entry to which the specified key is mapped,
+ // or null if this map contains no mapping for the key.
+ Entry* get(K k) {
+ return get_entry(k);
+ }
+
+ // Removes all of the mappings from this map. The map will be empty after this call returns.
+ void clear() {
+ // put all entries just into free list
+ for (int idx = 0; idx < BasicHashtable::table_size(); ++idx) {
+ for (Entry* entry = bucket(idx); NULL != entry;) {
+ Entry* next = entry->next();
+ this->free_entry(entry);
+ entry = next;
+ }
+ BasicHashtable::set_entry(idx, NULL);
+ }
+ }
+
+ Iterator begin() {
+ return Iterator(this);
+ }
+
+ Iterator end() {
+ Iterator itr(this);
+ itr._cur = NULL;
+ itr._idx = BasicHashtable::table_size();
+ return itr;
+ }
+
+ // set an alternative hashing function
+ void set_alt_hasher(AltHashFunc* hash_func) {
+ _alt_hasher = hash_func;
+ }
+};
+
+// External iteration support
+template
+class HashMapIterator VALUE_OBJ_CLASS_SPEC {
+ friend class HashMap;
+#ifndef PRODUCT
+ friend class HashMapTest;
+#endif // PRODUCT
+
+private:
+ typedef typename HashMap::Entry Entry;
+ Entry* _cur;
+ HashMap* _map;
+ int _idx;
+
+public:
+ HashMapIterator(HashMap* map)
+ : _map(map), _cur(NULL), _idx(0) {
+ for (;_idx < _map->table_size(); ++_idx) {
+ _cur = _map->bucket(_idx);
+ if (NULL != _cur) {
+ break;
+ }
+ }
+ }
+
+ HashMapIterator(const HashMapIterator& other)
+ : _map(other._map), _cur(other._cur), _idx(other._idx)
+ { }
+
+ HashMapIterator& operator++() {
+ if (NULL != _cur) {
+ if (NULL != _cur->next()) {
+ _cur = _cur->next();
+ } else {
+ do {
+ ++_idx;
+ } while (_idx < _map->table_size()
+ && NULL == _map->bucket(_idx));
+
+ assert(_idx <= _map->table_size(), "pre-condition");
+
+ if (_idx == _map->table_size()) {
+ // end of iteration
+ _cur = NULL;
+ } else {
+ // move to next bucket
+ _cur = _map->bucket(_idx);
+ }
+ }
+ }
+
+ return *this;
+ }
+
+ HashMapIterator& operator = (const HashMapIterator& other) {
+ if (&other != this) {
+ _map = other._map;
+ _cur = other._cur;
+ _idx = other._idx;
+ }
+ return *this;
+ }
+
+ Entry& operator*() { return *_cur; }
+
+ Entry* operator->() { return _cur; }
+
+ bool operator == (const HashMapIterator& other) const {
+ return (_map == other._map && _cur == other._cur && _idx == other._idx);
+ }
+
+ bool operator != (const HashMapIterator& other) const {
+ return (_map != other._map || _cur != other._cur || _idx != other._idx);
+ }
+};
+
#endif // SHARE_VM_UTILITIES_HASHTABLE_HPP
diff --git a/src/share/vm/utilities/hashtable.inline.hpp b/src/share/vm/utilities/hashtable.inline.hpp
index 9356c985ee5ce88f4f9cbb297cbcc8d2b80eca12..78c6fc54cb8798764ebd8fdf8d43339942ec5190 100644
--- a/src/share/vm/utilities/hashtable.inline.hpp
+++ b/src/share/vm/utilities/hashtable.inline.hpp
@@ -64,6 +64,7 @@ template inline void BasicHashtable::initialize(int table_size,
_first_free_entry = NULL;
_end_block = NULL;
_number_of_entries = number_of_entries;
+ _memory_blocks = NULL;
#ifdef ASSERT
_lookup_count = 0;
_lookup_length = 0;
diff --git a/test/multi-tenant/TestContainerOf.java b/test/multi-tenant/TestContainerOf.java
new file mode 100644
index 0000000000000000000000000000000000000000..15d12a876e251b32d5572638b90797a489d89d4f
--- /dev/null
+++ b/test/multi-tenant/TestContainerOf.java
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test TenantContainer.containOf() to retrieve tenant container of a Java object
+ * @library /testlibrary
+ * @build TestContainerOf
+ * @run main/othervm -XX:+MultiTenant -XX:+TenantHeapIsolation -XX:+UseG1GC -Xmx1024M -Xms512M
+ * -XX:G1HeapRegionSize=1M TestContainerOf
+ */
+
+import static com.oracle.java.testlibrary.Asserts.*;
+import com.alibaba.tenant.TenantConfiguration;
+import com.alibaba.tenant.TenantContainer;
+import com.alibaba.tenant.TenantException;
+import java.util.Arrays;
+
+public class TestContainerOf {
+
+ public static void main(String[] args) throws Exception {
+ new TestContainerOf().runAllTests();
+ }
+
+ void runAllTests() throws Exception {
+ testTenantContainerOf(1);
+ testTenantContainerOf(10);
+ testTenantContainerOf(80);
+ testRunInRootTenant();
+ }
+
+ // test TenantContainer.containerOf()
+ private void testTenantContainerOf(int count) throws Exception {
+ System.out.println(">> Begin TEST testTenantContainerOf: count=" + count);
+
+ Object[] objects = new Object[count];
+ TenantContainer[] tenants = new TenantContainer[count];
+ Object objInRoot = new Object();
+ TenantConfiguration config = new TenantConfiguration().limitHeap(32 * 1024 * 1024 /* 32 MB heap */);
+
+ assertNull(TenantContainer.containerOf(objInRoot));
+
+ for (int i = 0; i < count; ++i) {
+ tenants[i]= TenantContainer.create(config);
+ final int idx = i;
+ final TenantContainer thisContainer = tenants[i];
+ thisContainer.run(() -> {
+ objects[idx] = new Object();
+
+ TenantContainer current = TenantContainer.current();
+
+ assertNotNull(current);
+ assertTrue(current == thisContainer);
+ assertNotNull(TenantContainer.containerOf(objects[idx]));
+ });
+ }
+
+ for (int i = 0; i < count; ++i) {
+ TenantContainer containerGet = TenantContainer.containerOf(objects[i]);
+ assertNotNull(containerGet);
+ long idGet = containerGet.getTenantId();
+ long idCur = tenants[i].getTenantId();
+ assertEquals(idGet, idCur);
+ // assertTrue(containerGet.getTenantId() == tenants[i].getTenantId());
+ assertTrue(tenants[i] == containerGet);
+ }
+
+ Arrays.stream(tenants).forEach(t -> t.destroy());
+
+ Arrays.stream(objects).forEach(
+ obj -> assertNull(TenantContainer.containerOf(obj), "Should be owned by ROOT tenant"));
+
+ System.out.println("< {
+ assertTrue(TenantContainer.current() == tenant);
+ Object obj = TenantContainer.primitiveRunInRoot(()->{
+ //should be in root tenant.
+ assertTrue(TenantContainer.current() == null);
+ return new Object();
+ });
+ //obj should be allocated in root tenant.
+ assertTrue(TenantContainer.containerOf(obj) == null);
+ });
+ }
+}
diff --git a/test/multi-tenant/TestHeapIsolation.java b/test/multi-tenant/TestHeapIsolation.java
new file mode 100644
index 0000000000000000000000000000000000000000..deecf6f8058181ae80a86ab76ac9b22d863f4c01
--- /dev/null
+++ b/test/multi-tenant/TestHeapIsolation.java
@@ -0,0 +1,488 @@
+
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test isolation of per-tenant Java heap space
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestHeapIsolation
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm/timeout=600 -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+MultiTenant -XX:+TenantHeapIsolation -XX:+WhiteBoxAPI -XX:+UseG1GC -Xmx2048M -Xms1024M -XX:G1HeapRegionSize=1M TestHeapIsolation
+ *
+ */
+
+import static com.oracle.java.testlibrary.Asserts.*;
+
+import com.alibaba.tenant.TenantConfiguration;
+import com.alibaba.tenant.TenantContainer;
+import com.alibaba.tenant.TenantException;
+import sun.hotspot.WhiteBox;
+import java.lang.reflect.Field;
+import java.util.Arrays;
+import java.util.stream.IntStream;
+
+public class TestHeapIsolation {
+
+ // GC operation types, used to trigger GC operation during testing
+ enum GCType {
+ NO_GC,
+ YOUNG_GC,
+ FULL_GC,
+ SYSTEM_GC
+ }
+
+ // Allocation operation type of objects
+ enum AllocType {
+ AT_HUMONGOUS,
+ AT_TINY,
+ AT_MIXED
+ }
+
+ private static final WhiteBox wb;
+
+ // Size of single G1 heap region
+ private static final int HEAP_REGION_SIZE;
+
+ // to access com.alibaba.TenantContainer.allocationContext
+ private static Field allocationContextField;
+
+ private static boolean verbose = "true".equalsIgnoreCase(System.getProperty("TestHeapIsolationVerbose"));
+
+ static {
+ wb = WhiteBox.getWhiteBox();
+
+ HEAP_REGION_SIZE = wb.g1RegionSize();
+
+ // provide accessor to TenantContainer.allocationContext
+ try {
+ allocationContextField = TenantContainer.class.getDeclaredField("allocationContext");
+ allocationContextField.setAccessible(true);
+ } catch (NoSuchFieldException e) {
+ e.printStackTrace();
+ }
+ }
+
+ public static void main(String args[]) throws Exception {
+ new TestHeapIsolation().runAllTests();
+ }
+
+ void runAllTests() throws Exception {
+ assertTrue(HEAP_REGION_SIZE > 0);
+
+ testEmptyTenantOperation();
+ testTenantOccupiedMemory();
+
+ // test with single tenant for sanity
+ testTenantAllocation(1, GCType.NO_GC, AllocType.AT_TINY);
+ testTenantAllocation(1, GCType.YOUNG_GC, AllocType.AT_TINY);
+ testTenantAllocation(1, GCType.FULL_GC, AllocType.AT_TINY);
+ testTenantAllocation(1, GCType.SYSTEM_GC, AllocType.AT_TINY);
+
+ // test with two tenants
+ testTenantAllocation(2, GCType.NO_GC, AllocType.AT_TINY);
+ testTenantAllocation(2, GCType.YOUNG_GC, AllocType.AT_TINY);
+ testTenantAllocation(2, GCType.FULL_GC, AllocType.AT_TINY);
+ testTenantAllocation(2, GCType.SYSTEM_GC, AllocType.AT_TINY);
+
+ // test with multiple tenants, a little pressure
+ testTenantAllocation(128, GCType.NO_GC, AllocType.AT_TINY);
+ testTenantAllocation(128, GCType.YOUNG_GC, AllocType.AT_TINY);
+ testTenantAllocation(128, GCType.FULL_GC, AllocType.AT_TINY);
+ testTenantAllocation(128, GCType.SYSTEM_GC, AllocType.AT_TINY);
+
+ // test with mixed allocation
+ testTenantAllocation(32, GCType.NO_GC, AllocType.AT_MIXED);
+ testTenantAllocation(32, GCType.YOUNG_GC, AllocType.AT_MIXED);
+ testTenantAllocation(32, GCType.FULL_GC, AllocType.AT_MIXED);
+ testTenantAllocation(32, GCType.SYSTEM_GC, AllocType.AT_MIXED);
+ }
+
+ /*
+ *
+ * The test tries to enforce:
+ * 1, Object allocation should happen in the correct tenant container
+ * 2, Object copy operations during GC should not violate tenant container boundary
+ *
+ * @param count Create {@count} non-root tenant containers in the test
+ * @param gcType Trigger GC during doing tenant container allocations
+ * @param allocType Allocated object size type
+ */
+ void testTenantAllocation(int count, GCType gcType, AllocType allocType) throws Exception {
+ System.out.println(">> Begin TEST testTenantAllocation ("
+ + count + " containers, gcType = " + gcType.name() + ", allocType=" + allocType.name());
+
+ assertTrue(count > 0, "Cannot test with " + count + " tenants");
+
+ // local array holds objects to prevent them from being potentially reclaimed by GC,
+ // NOTE: there are cross-tenant references in this array
+ Object[] objHolder = new Object[count];
+ Object[] objHolder2 = new Object[count];
+
+ final Object objRootBefore = allocateObject(allocType);
+ TenantContainer curTenant = TenantContainer.containerOf(objRootBefore);
+ assertNull(curTenant, "Object was allocated in wrong container " + allocationContextStringOf(curTenant));
+
+ // the configuration here is
+ TenantConfiguration config = new TenantConfiguration().limitHeap(32 * 1024 * 1024 /* 32MB heap */);
+
+ // to prevent TenantContainer from being GCed
+ TenantContainer[] tenants = new TenantContainer[count];
+
+ // if multiple tenant containers (>8), we'll run them in different threads
+ int threadRunLimit = 8;
+ if (count > threadRunLimit) {
+ int taskPerThread = count / threadRunLimit;
+ Thread[] thread = new Thread[threadRunLimit];
+
+ for (int i = 0; i < threadRunLimit; ++i) {
+ final int thrdIndex = i;
+
+ thread[i] = new Thread() {
+
+ public void run() {
+ for (int j = 0; j < taskPerThread; ++j) {
+ // index in the tenant contaner array, and obj holder arrays
+ final int idx = thrdIndex * taskPerThread + j;
+
+ // quit loop if we exceeded limit of 'count'
+ if (idx >= count) break;
+
+ tenants[idx] = TenantContainer.create(config);
+ try {
+ tenants[idx].run(() -> {
+ objHolder[idx] = allocateObject(allocType);
+
+ assert(null != TenantContainer.containerOf(objHolder[idx]));
+ assert(tenants[idx] == TenantContainer.containerOf(objHolder[idx]));
+
+ IntStream.range(0, objHolder.length).forEach(x -> verbose_cr("objHolder[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder[x]))));
+ IntStream.range(0, objHolder2.length).forEach(x -> verbose_cr("objHolder2[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder2[x]))));
+
+ triggerGC(gcType);
+
+ IntStream.range(0, objHolder.length).forEach(x -> verbose_cr("objHolder[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder[x]))));
+ IntStream.range(0, objHolder2.length).forEach(x -> verbose_cr("objHolder2[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder2[x]))));
+
+ objHolder2[idx] = allocateObject(allocType);
+ assert(null != TenantContainer.containerOf(objHolder2[idx]));
+ assert(tenants[idx] == TenantContainer.containerOf(objHolder2[idx]));
+
+ assertEquals(tenants[idx], TenantContainer.containerOf(objHolder[idx]));
+ assertEquals(tenants[idx], TenantContainer.containerOf(objHolder2[idx]));
+ assertInSameContainer(objHolder[idx], objHolder2[idx]);
+ });
+ } catch (TenantException e) {
+ e.printStackTrace();
+ }
+ }
+ }
+ };
+ thread[i].start();
+ }
+
+ for (int i = 0; i < threadRunLimit; ++i) {
+ thread[i].join();
+ }
+ } else {
+ for (int i = 0; i < count; ++i) {
+ tenants[i] = TenantContainer.create(config);
+ final int idx = i;
+ tenants[i].run(() -> {
+ objHolder[idx] = allocateObject(allocType);
+
+ IntStream.range(0, objHolder.length).forEach(x -> verbose_cr("objHolder[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder[x]))));
+ IntStream.range(0, objHolder2.length).forEach(x -> verbose_cr("objHolder2[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder2[x]))));
+
+ triggerGC(gcType);
+
+ IntStream.range(0, objHolder.length).forEach(x -> verbose_cr("objHolder[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder[x]))));
+ IntStream.range(0, objHolder2.length).forEach(x -> verbose_cr("objHolder2[" + idx + "]@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder2[x]))));
+
+ objHolder2[idx] = allocateObject(allocType);
+ assert(null != TenantContainer.containerOf(objHolder2[idx]));
+ assert(tenants[idx] == TenantContainer.containerOf(objHolder2[idx]));
+
+ assertEquals(tenants[idx], TenantContainer.containerOf(objHolder[idx]));
+ assertEquals(tenants[idx], TenantContainer.containerOf(objHolder2[idx]));
+ assertInSameContainer(objHolder[idx], objHolder2[idx]);
+ });
+ }
+ }
+
+ // perform gc after tenant creation
+ IntStream.range(0, objHolder.length).forEach(x -> verbose_cr("objHolder@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder[x]))));
+ IntStream.range(0, objHolder2.length).forEach(x -> verbose_cr("objHolder2@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder2[x]))));
+
+ triggerGC(gcType);
+
+ IntStream.range(0, objHolder.length).forEach(x -> verbose_cr("objHolder@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder[x]))));
+ IntStream.range(0, objHolder2.length).forEach(x -> verbose_cr("objHolder2@0x"
+ + Long.toHexString(wb.getObjectAddress(objHolder2[x]))));
+
+ // allocate object in root tenant after execution of tenant code
+ Object objRootAfter = allocateObject(allocType);
+
+ curTenant = TenantContainer.containerOf(objRootBefore);
+ assertNull(curTenant, "Object was copied to wrong container " + allocationContextStringOf(curTenant));
+ curTenant = TenantContainer.containerOf(objRootAfter);
+ assertNull(curTenant, "Object was copied to wrong container " + allocationContextStringOf(curTenant));
+
+ for (int i = 0; i < count; ++i) {
+ assertNotNull(objHolder[i]);
+ assertNotNull(objHolder2[i]);
+ assertInSameContainer(objHolder[i], objHolder2[i]);
+ curTenant = TenantContainer.containerOf(objHolder[i]);
+ assertNotNull(curTenant, "Object["+i+"] @0x" + Long.toHexString(wb.getObjectAddress(objHolder[i]))+
+ " was copied to wrong container " + allocationContextStringOf(curTenant));
+ curTenant = TenantContainer.containerOf(objHolder2[i]);
+ assertNotNull(curTenant, "Object was copied to wrong container " + allocationContextStringOf(curTenant));
+
+ for (int j = 0; j < count; ++j) {
+ if (i != j) {
+ assertNotEquals(TenantContainer.containerOf(objHolder[i]), TenantContainer.containerOf(objHolder[j]));
+ assertNotEquals(TenantContainer.containerOf(objHolder2[i]), TenantContainer.containerOf(objHolder2[j]));
+ assertNotEquals(TenantContainer.containerOf(objHolder[i]), TenantContainer.containerOf(objHolder2[j]));
+ assertNotEquals(TenantContainer.containerOf(objHolder2[i]), TenantContainer.containerOf(objHolder[j]));
+ }
+ }
+ }
+
+ // destroy all native objects
+ Arrays.stream(tenants).forEach(t -> t.destroy());
+
+ // after tenant destruction, all objects should belong to ROOT tenant
+ Arrays.stream(objHolder).forEach(
+ obj -> assertNull(TenantContainer.containerOf(obj), "Should be owned by root tenant"));
+ Arrays.stream(objHolder2).forEach(
+ obj -> assertNull(TenantContainer.containerOf(obj), "Should be owned by root tenant"));
+
+ System.out.println("<< End TEST testTenantAllocation");
+ }
+
+ // invoke GC on Java heap
+ private static void triggerGC(GCType gcType) {
+ switch (gcType) {
+ case FULL_GC:
+ wb.fullGC();
+ break;
+ case YOUNG_GC:
+ wb.youngGC();
+ break;
+ case SYSTEM_GC:
+ System.gc();
+ break;
+ case NO_GC:
+ default:
+ // nothing to do
+ }
+ }
+
+ void testTenantOccupiedMemory() {
+
+ // clean up heap space
+ System.gc();
+
+ TenantConfiguration config = new TenantConfiguration().limitHeap(64 * 1024 * 1024);
+ TenantContainer tenant = TenantContainer.create(config);
+ assertTrue(0L == tenant.getOccupiedMemory());
+ try {
+ // something will be allocated anyway
+ tenant.run(()->{
+ assertEquals(0, tenant.getOccupiedMemory());
+ int i = 1;
+ assertEquals(0, tenant.getOccupiedMemory());
+ });
+ // detach() will allocate some garbage anyway, but still no cross-tenant references
+ assertEquals(HEAP_REGION_SIZE, tenant.getOccupiedMemory());
+
+ // slight allocation, occupied size should be 1 * RegionSize;
+ tenant.run(()->{
+ Object o = new byte[1];
+ });
+ assertEquals(HEAP_REGION_SIZE, tenant.getOccupiedMemory());
+
+ // allocate a humongous object, size should be 2 * regionsize;
+ Object[] strongRefs = new Object[1];
+ tenant.run(()->{
+ for (int i = 0; i < 4; ++i) {
+ strongRefs[0] = new byte[HEAP_REGION_SIZE / 4];
+ }
+ });
+ assertEquals(HEAP_REGION_SIZE * 2, tenant.getOccupiedMemory());
+
+ // do full gc to release above newly created garbage
+ System.gc();
+ assertEquals(HEAP_REGION_SIZE, tenant.getOccupiedMemory());
+
+ strongRefs[0] = null;
+
+ System.gc();
+ assertEquals(0, tenant.getOccupiedMemory());
+
+ // scenario that live region survived full GC
+ Object refs[] = new Object[1];
+ tenant.run(()->{
+ refs[0] = new byte[1];
+ System.gc();
+ refs[0] = new byte[1];
+ });
+ assertEquals(HEAP_REGION_SIZE * 2, // 1 old, 1 eden
+ tenant.getOccupiedMemory());
+ System.gc(); // will still left one region
+ assertEquals(HEAP_REGION_SIZE, // 1 old
+ tenant.getOccupiedMemory());
+ } catch (TenantException e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ /*
+ * Determine if memory isolation works correctly if no non-root tenant allocation happened
+ *
+ */
+ void testEmptyTenantOperation() throws Exception {
+ System.out.println(">> Begin TEST testEmptyTenantOperation");
+
+ // the configuration here is
+ TenantConfiguration config = new TenantConfiguration().limitHeap(128 * 1024 * 1024 /* 128 MB heap */);
+ for (int i = 0; i < 16; ++i) {
+ TenantContainer tenant = TenantContainer.create(config);
+ tenant.run(() -> {
+ // Empty! and no tenant allocation happened
+ });
+ tenant.destroy();
+ }
+
+ System.gc();
+
+ System.out.println("<< End TEST testEmptyTenantOperation");
+ }
+
+
+ // allcoate one Java object whose data occupies {@code size} bytes
+ private static Object allocateObjectOfSize(int size) {
+ if (size > 0) {
+ verbose("Allocate object of size:" + size);
+ Object newObj = new byte[size];
+
+ long ac = allocationContextOf(TenantContainer.current());
+ verbose_cr(", @0x" + Long.toHexString(wb.getObjectAddress(newObj)) + ", tenant["
+ + (ac == 0l ? "ROOT" : "0x" + Long.toHexString(ac)) + "]");
+ return newObj;
+ }
+ return null;
+ }
+
+ private static AllocType lastMixedAllocType = AllocType.AT_TINY;
+ private static Object allocateObject(AllocType allocType) {
+ // adjust allocation type
+ if (allocType == AllocType.AT_MIXED) {
+ allocType = lastMixedAllocType;
+ if (lastMixedAllocType == AllocType.AT_HUMONGOUS) {
+ lastMixedAllocType = AllocType.AT_TINY;
+ } else if (lastMixedAllocType == AllocType.AT_TINY) {
+ lastMixedAllocType = AllocType.AT_HUMONGOUS;
+ }
+ }
+
+ // do allocation
+ switch (allocType) {
+ case AT_HUMONGOUS: return allocateObjectOfSize(HEAP_REGION_SIZE << 2);
+ case AT_TINY: return allocateObjectOfSize(1);
+ }
+ return null;
+ }
+
+ // retrieve allocationContext field from a TenantContainer object via reflection
+ private static long allocationContextOf(TenantContainer tenant) {
+ if (tenant != null && allocationContextField != null) {
+ try {
+ return (Long)allocationContextField.get(tenant);
+ } catch (IllegalAccessException e) {
+ e.printStackTrace();
+ }
+ }
+ return 0L;
+ }
+
+ private static String allocationContextStringOf(TenantContainer tenant) {
+ if (tenant != null && allocationContextField != null) {
+ try {
+ return "0x" + Long.toHexString((Long)allocationContextField.get(tenant));
+ } catch (IllegalAccessException e) {
+ e.printStackTrace();
+ }
+ }
+ return "NULL";
+ }
+
+ private static void verbose(String msg) {
+ if (verbose) {
+ System.out.print(msg);
+ }
+ }
+ private static void verbose_cr(String msg) {
+ verbose(msg + "\n");
+ }
+
+ // Assertions
+ private static void assertEquals(TenantContainer c1, TenantContainer c2) {
+ if (c1 != c2) {
+ String msg = "TenantContainer equals failed: c1=" +
+ (c1 == null ? "NULL" : "0x" + Long.toHexString(allocationContextOf(c1)))
+ + ", c2=" +
+ (c2 == null ? "NULL" : "0x" + Long.toHexString(allocationContextOf(c2)));
+ throw new RuntimeException(msg);
+ }
+ }
+
+ private static void assertInSameContainer(Object o1, Object o2) {
+ TenantContainer t1, t2;
+ t1 = TenantContainer.containerOf(o1);
+ t2 = TenantContainer.containerOf(o2);
+ if (t1 != t2) {
+ String msg = "o1@0x" + Long.toHexString(wb.getObjectAddress(o1)) + " from t1(" +
+ (t1 == null ? "NULL" : "0x" + Long.toHexString(allocationContextOf(t1)))
+ + "), o2@0x" + Long.toHexString(wb.getObjectAddress(o2)) + "from t2(" +
+ (t2 == null ? "NULL" : "0x" + Long.toHexString(allocationContextOf(t2))) + ")";
+ throw new RuntimeException(msg);
+ }
+ }
+
+ private static void assertEquals(long expected, long actual) {
+ if (expected != actual) {
+ throw new RuntimeException("assertEquals failed: expected = " + expected + ", actual = " + actual);
+ }
+ }
+}
diff --git a/test/multi-tenant/TestMultiTenantOptionDeps.sh b/test/multi-tenant/TestMultiTenantOptionDeps.sh
new file mode 100644
index 0000000000000000000000000000000000000000..9a6673c6c984772ba5899afb2b6986ce26cca1bc
--- /dev/null
+++ b/test/multi-tenant/TestMultiTenantOptionDeps.sh
@@ -0,0 +1,75 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Alibaba designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+#
+# @test TestMultiTenantOptionDeps
+# @summary Test the dependencies of multi-tenant options
+# @run shell TestMultiTenantOptionDeps.sh
+#
+
+if [ "${TESTSRC}" = "" ]
+then
+ TESTSRC=${PWD}
+ echo "TESTSRC not set. Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+## Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../test_env.sh
+
+JAVA=${TESTJAVA}${FS}bin${FS}java
+
+set -x
+
+# if $FROM is enabled, $TO should be enabled automatically
+function check_dependency_bool_bool() {
+ FROM=$1
+ TO="$(echo $2 | sed 's/-XX:+//g')"
+ if [ -z "$(${JAVA} ${FROM} -XX:+PrintFlagsFinal -version 2>&1 | grep ${TO} | grep '= true')" ]; then
+ echo "check_dependency_bool_bool failed: $1 --> $2"
+ exit 1
+ fi
+}
+
+function check_dependency_bool_bool_false() {
+ FROM=$1
+ TO="$(echo $2 | sed 's/-XX:+//g')"
+ if [ -z "$(${JAVA} ${FROM} -XX:+PrintFlagsFinal -version 2>&1 | grep ${TO} | grep '= false')" ]; then
+ echo "check_dependency_bool_bool failed: $1 --> $2"
+ exit 1
+ fi
+}
+
+# check if provided jvm arguments is invalid
+function assert_invalid_jvm_options() {
+ JVM_ARGS=$1
+ CMD="${JAVA} ${JVM_ARGS} -version"
+ OUT=$(${CMD} 2>&1)
+ if [ 0 -eq $? ]; then
+ echo "Expected invalid JVM arguments: ${JVM_ARGS}"
+ exit 1
+ fi
+}
+
+check_dependency_bool_bool '-XX:+UseG1GC -XX:+TenantHeapIsolation' '-XX:+MultiTenant'
+assert_invalid_jvm_options '-XX:+TenantHeapIsolation'
+assert_invalid_jvm_options '-XX:+TenantHeapIsolation -XX:+UseConcMarkSweepGC'
+assert_invalid_jvm_options '-XX:+UseG1GC -XX:+TenantHeapIsolation -XX:-MultiTenant'
diff --git a/test/multi-tenant/TestParGCAllocatorLeak.sh b/test/multi-tenant/TestParGCAllocatorLeak.sh
new file mode 100644
index 0000000000000000000000000000000000000000..e939a20023c8a2aab4903c1709aac8daf1bb9422
--- /dev/null
+++ b/test/multi-tenant/TestParGCAllocatorLeak.sh
@@ -0,0 +1,138 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+#
+# This code is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License version 2 only, as
+# published by the Free Software Foundation. Alibaba designates this
+# particular file as subject to the "Classpath" exception as provided
+# by Oracle in the LICENSE file that accompanied this code.
+#
+# This code is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# version 2 for more details (a copy is included in the LICENSE file that
+# accompanied this code).
+#
+# You should have received a copy of the GNU General Public License version
+# 2 along with this work; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+
+#
+# @test TestParGCAllocatorLeak.sh
+# @summary test memory leak of G1ParGCAllocator
+# @run shell TestParGCAllocatorLeak.sh
+#
+
+if [ "${TESTSRC}" = "" ]
+then
+ TESTSRC=${PWD}
+ echo "TESTSRC not set. Using "${TESTSRC}" as default"
+fi
+echo "TESTSRC=${TESTSRC}"
+# Adding common setup Variables for running shell tests.
+. ${TESTSRC}/../test_env.sh
+
+JAVA=${TESTJAVA}${FS}bin${FS}java
+JAVAC=${TESTJAVA}${FS}bin${FS}javac
+JCMD=${TESTJAVA}${FS}bin${FS}jcmd
+TEST_CLASS=TestLeak
+TEST_SRC=$TEST_CLASS.java
+
+###################################################################################
+cat > $TEST_SRC << EOF
+import com.alibaba.tenant.*;
+import java.util.*;
+
+class $TEST_CLASS {
+ public static void main(String[] args) {
+ int nofCpus = Runtime.getRuntime().availableProcessors();
+ Thread[] threads = new Thread[nofCpus];
+ for (int i = 0; i < nofCpus; ++i) {
+ threads[i] = new Thread(()->{
+ TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(32 * 1024 * 1024));
+ try {
+ tenant.run(()->{
+ while (true) {
+ Object o = new byte[1024];
+ }
+ });
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+ threads[i].start();
+ }
+
+ Arrays.stream(threads).forEach(t->{
+ try {
+ t.join();
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+}
+EOF
+
+# Do compilation
+${JAVAC} ${TEST_SRC}
+if [ $? != '0' ]
+then
+ echo "Failed to compile ${TEST_SRC}"
+ exit 1
+fi
+
+set -x
+
+${JAVA} -cp . -XX:+UseG1GC -XX:+MultiTenant -XX:+TenantHeapIsolation -XX:NativeMemoryTracking=detail -XX:+PrintGCDetails -Xloggc:gc.log -Xmx1g -Xmn32m ${TEST_CLASS} > ${TEST_CLASS}.log 2>&1 &
+sleep 5
+PID=$(ps ax | grep ${TEST_CLASS} | grep -v grep | awk '{print $1}')
+if [ -z "$PID" ] && [ "$(echo $PID | wc -w)" -gt 1 ] ; then
+ echo "BAD pid!"
+ exit 1
+fi
+
+# set up baseline
+$JCMD $PID VM.native_memory baseline
+
+# sleep 30s
+sleep 30
+
+# check differences of below sections
+NMT_SECTIONS=("Internal" "Tenant")
+
+for MEM_SEC in ${NMT_SECTIONS[*]}; do
+ DIFF=$($JCMD $PID VM.native_memory summary.diff | grep -A3 ${MEM_SEC} | grep malloc | grep -v grep)
+ if [ ! -z "$(echo $DIFF | grep +)" ] && [ -z "$(echo $DIFF | awk '{print $2}' | grep \#)" ]; then
+ DIFF=$(echo $DIFF | awk '{print $2}')
+ echo "DIFF=$DIFF"
+ if [ ! -z "$(echo $DIFF | grep KB)" ]; then
+ # only check result if diff >= 1kb
+ DIFF_V="$(echo $DIFF | sed -e 's/KB//g' -e 's/+//g' -e 's/-//g')"
+ if [ -z $DIFF_V]; then
+ echo "Bad diff value $DIFF_V"
+ kill -9 $PID
+ exit 1
+ fi
+ if [ $DIFF_V -gt 1024 ]; then
+ echo "Diff value is great than 1 MB, maybe leaking!!"
+ kill -9 $PID
+ exit 1
+ fi
+ fi
+
+ # just checking
+ if [ ! -z "$(echo $DIFF | grep MB)" ] || [ ! -z "$(echo $DIFF | grep GB)" ]; then
+ echo "Cannot in MB or GB scale mode"
+ kill -9 $PID
+ exit 1
+ fi
+ else
+ echo "No significant memory size changed, skipping"
+ fi
+done
+
+kill -9 $PID
diff --git a/test/multi-tenant/TestPerTenantTLAB.java b/test/multi-tenant/TestPerTenantTLAB.java
new file mode 100644
index 0000000000000000000000000000000000000000..7285c5c3665871dc51dd85205a1ed1c8dc5697a5
--- /dev/null
+++ b/test/multi-tenant/TestPerTenantTLAB.java
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2020 Alibaba Group Holding Limited. All Rights Reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation. Alibaba designates this
+ * particular file as subject to the "Classpath" exception as provided
+ * by Oracle in the LICENSE file that accompanied this code.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+/*
+ * @test
+ * @summary Test retain and reuse of TLAB
+ * @library /testlibrary /testlibrary/whitebox
+ * @build TestPerTenantTLAB
+ * @run main ClassFileInstaller sun.hotspot.WhiteBox
+ * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:+UsePerTenantTLAB -XX:+TenantHeapIsolation -XX:+UseG1GC -XX:+UseTLAB -XX:TLABSize=65535 -Xmx1024M -Xms512M -XX:G1HeapRegionSize=1M TestPerTenantTLAB
+ *
+ */
+
+import static com.oracle.java.testlibrary.Asserts.*;
+
+import com.alibaba.tenant.TenantConfiguration;
+import com.alibaba.tenant.TenantContainer;
+import com.alibaba.tenant.TenantException;
+import com.alibaba.tenant.TenantState;
+import sun.hotspot.WhiteBox;
+
+import java.util.concurrent.CountDownLatch;
+
+public class TestPerTenantTLAB {
+
+ private static final WhiteBox WB = WhiteBox.getWhiteBox();
+
+ private static final int G1_HEAP_REGION_SIZE = WB.g1RegionSize();
+
+ private static final int G1_HEAP_REGION_MASK = (0xFFFFFFFF << Integer.numberOfTrailingZeros(G1_HEAP_REGION_SIZE));
+
+ // non-adaptive TLAB size, see -XX:TLABSize from command line options
+ private static final int TLAB_SIZE = 65535;
+
+ private void testRetainReuseTLABBasic() {
+ TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(64 * 1024 * 1024));
+ Object[] refs = new Object[16];
+
+ WB.fullGC();
+ // after full GC, below allocation request will start from the beginning (almost) of new EDEN regions.
+
+ try {
+ refs[0] = new Object();
+ refs[1] = new Object();
+ assertInCurrentTLAB(refs[0], refs[1]);
+
+ tenant.run(()->{
+ refs[2] = new Object();
+ refs[3] = new Object();
+ assertInCurrentTLAB(refs[2], refs[3]);
+ assertNotInCurrentTLAB(refs[0], refs[1]);
+ });
+
+ assertNotInCurrentTLAB(refs[2], refs[3]);
+ assertInCurrentTLAB(refs[0], refs[1]);
+ assertNotInSameRegion(refs[1], refs[2]);
+
+ refs[4] = new Object();
+ refs[5] = new Object();
+
+ assertInCurrentTLAB(refs[4], refs[5]);
+ assertNotInSameRegion(refs[3], refs[4]);
+ assertInCurrentTLAB(refs[0], refs[1], refs[4], refs[5]);
+
+ tenant.run(()->{
+ refs[6] = new Object();
+ refs[7] = new Object();
+ assertInCurrentTLAB(refs[2], refs[3], refs[6], refs[7]);
+ assertNotInSameRegion(refs[4], refs[7]);
+ assertNotInSameRegion(refs[5], refs[6]);
+ });
+
+ refs[8] = new Object();
+ refs[9] = new Object();
+ assertInCurrentTLAB(refs[0], refs[1], refs[4], refs[5], refs[8], refs[9]);
+ } catch (TenantException e) {
+ throw new RuntimeException(e);
+ } finally {
+ tenant.destroy();
+ }
+ }
+
+ private void testChildThread() {
+ TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(64 * 1024 * 1024));
+ Object[] refs = new Object[16];
+
+ WB.fullGC();
+ // after full GC, below allocation request will start from the beginning (almost) of new EDEN regions.
+
+ try {
+ tenant.run(()-> {
+ refs[0] = new Object();
+ refs[1] = new Object();
+ assertInCurrentTLAB(refs[0], refs[1]);
+
+ Thread t = new Thread(()->{
+ refs[2] = new Object();
+ refs[3] = new Object();
+ assertInCurrentTLAB(refs[2], refs[3]);
+ assertNotInCurrentTLAB(refs[0], refs[1]);
+
+ TenantContainer.primitiveRunInRoot(()-> {
+ refs[4] = new Object();
+ refs[5] = new Object();
+ assertInCurrentTLAB(refs[4], refs[5]);
+ assertNotInSameRegion(refs[2], refs[5]);
+ });
+ });
+
+ // wait child thread to end
+ t.start();
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ }
+
+ // now newly allocated ROOT threads should be in same TLAB of previous ROOT objects
+ TenantContainer.primitiveRunInRoot(()->{
+ refs[6] = new Object();
+ refs[7] = new Object();
+ assertInCurrentTLAB(refs[6], refs[7]);
+ assertInSameRegion(refs[4], refs[6]);
+ assertInSameRegion(refs[4], refs[7]);
+ });
+ });
+ } catch (TenantException e) {
+ throw new RuntimeException(e);
+ }
+
+ refs[8] = new Object();
+ refs[9] = new Object();
+ assertInCurrentTLAB(refs[8], refs[9], refs[6], refs[7]);
+
+ Thread t = new Thread(()->{
+ refs[10] = new Object();
+ refs[11] = new Object();
+ assertInCurrentTLAB(refs[10], refs[11]);
+ assertNotInCurrentTLAB(refs[8], refs[9]);
+ assertInSameRegion(refs[8], refs[10]);
+ assertInSameRegion(refs[4], refs[11]);
+ assertInSameRegion(refs[5], refs[11]);
+ assertInSameRegion(refs[6], refs[11]);
+ });
+ t.start();
+ try {
+ t.join();
+ } catch (InterruptedException e) {
+ throw new RuntimeException(e);
+ } finally {
+ tenant.destroy();
+ }
+ }
+
+ private void testAfterDestroyTenant() {
+ TenantContainer tenant = TenantContainer.create(new TenantConfiguration().limitHeap(64 * 1024 * 1024));
+ Object[] refs = new Object[2];
+ CountDownLatch cdl = new CountDownLatch(1);
+ CountDownLatch started = new CountDownLatch(1);
+ assertInCurrentTLAB(refs, cdl);
+
+ System.gc();
+ Thread thread = new Thread(()->{
+ try {
+ tenant.run(()->{
+ refs[0] = new Object();
+ assertTrue(TenantContainer.containerOf(refs[0]) == tenant);
+ assertTrue(WB.isInCurrentTLAB(refs[0]));
+
+ started.countDown();
+
+ // attach and hold
+ try {
+ cdl.await();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ });
+ } catch (TenantException e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ });
+ thread.start();
+
+ try {
+ started.await();
+
+ assertTrue(TenantContainer.containerOf(refs[0]) == tenant);
+
+ tenant.destroy();
+ assertTrue(tenant.getState() == TenantState.DEAD);
+
+ // should have been moved to root
+ assertNull(TenantContainer.containerOf(refs[0]));
+
+ // trigger destroy
+ cdl.countDown();
+
+ thread.join();
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ throw new RuntimeException(e);
+ }
+ }
+
+ public static void main(String[] args) {
+ TestPerTenantTLAB test = new TestPerTenantTLAB();
+ test.testRetainReuseTLABBasic();
+ test.testChildThread();
+ test.testAfterDestroyTenant();
+ }
+
+ private static void assertInCurrentTLAB(Object...objs) {
+ for (Object o : objs) {
+ assertTrue(WB.isInCurrentTLAB(o));
+ }
+ }
+
+ private static void assertNotInSameTLAB(Object o1, Object o2) {
+ assertGreaterThanOrEqual((int)Math.abs(WB.getObjectAddress(o1) - WB.getObjectAddress(o2)), TLAB_SIZE);
+ }
+
+ private static void assertNotInCurrentTLAB(Object... objs) {
+ for (Object o : objs) {
+ assertFalse(WB.isInCurrentTLAB(o));
+ }
+ }
+
+ private static void assertNotInSameRegion(Object o1, Object o2) {
+ int addr1 = (int)WB.getObjectAddress(o1) & G1_HEAP_REGION_MASK;
+ int addr2 = (int)WB.getObjectAddress(o2) & G1_HEAP_REGION_MASK;
+ assertNotEquals(addr1, addr2);
+ }
+
+ private static void assertInSameRegion(Object o1, Object o2) {
+ int addr1 = (int)WB.getObjectAddress(o1) & G1_HEAP_REGION_MASK;
+ int addr2 = (int)WB.getObjectAddress(o2) & G1_HEAP_REGION_MASK;
+ assertEquals(addr1, addr2);
+ }
+}
diff --git a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
index ecaf83afb09f50876b234c83a9d9164b6a909eb3..e4c4ab45a73e9f3c7e0c54e684c4f01e90578dcc 100644
--- a/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
+++ b/test/testlibrary/whitebox/sun/hotspot/WhiteBox.java
@@ -267,4 +267,7 @@ public class WhiteBox {
public native boolean isContainerized();
public native void printOsInfo();
+ // TLAB
+ public native boolean isInCurrentTLAB(Object obj);
+
}