From d18497f1bd26d54b96b179ee5e690a27bce3abad Mon Sep 17 00:00:00 2001 From: tschatzl Date: Mon, 24 Mar 2014 15:31:00 +0100 Subject: [PATCH] 8036860: Pad and cache-align the BiasedMappedArray Summary: Pad and cache-align BiasedMappedArray instances by default to avoid performance variability problems due to false sharing, as instances of this data structures are typically used for performance sensitive code. Reviewed-by: brutisso, stefank --- src/share/vm/gc_implementation/g1/g1BiasedArray.cpp | 8 ++++++++ src/share/vm/gc_implementation/g1/g1BiasedArray.hpp | 8 ++------ src/share/vm/memory/padded.hpp | 8 ++++++++ src/share/vm/memory/padded.inline.hpp | 10 ++++++++++ 4 files changed, 28 insertions(+), 6 deletions(-) diff --git a/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp index 7f5023b42..d5851a6d4 100644 --- a/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp @@ -24,6 +24,14 @@ #include "precompiled.hpp" #include "gc_implementation/g1/g1BiasedArray.hpp" +#include "memory/padded.inline.hpp" + +// Allocate a new array, generic version. +address G1BiasedMappedArrayBase::create_new_base_array(size_t length, size_t elem_size) { + assert(length > 0, "just checking"); + assert(elem_size > 0, "just checking"); + return PaddedPrimitiveArray::create_unfreeable(length * elem_size); +} #ifndef PRODUCT void G1BiasedMappedArrayBase::verify_index(idx_t index) const { diff --git a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp index f80c70b4e..92b1e2782 100644 --- a/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp @@ -25,8 +25,8 @@ #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP #define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP +#include "memory/allocation.hpp" #include "utilities/debug.hpp" -#include "memory/allocation.inline.hpp" // Implements the common base functionality for arrays that contain provisions // for accessing its elements using a biased index. @@ -48,11 +48,7 @@ protected: _bias(0), _shift_by(0) { } // Allocate a new array, generic version. - static address create_new_base_array(size_t length, size_t elem_size) { - assert(length > 0, "just checking"); - assert(elem_size > 0, "just checking"); - return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC); - } + static address create_new_base_array(size_t length, size_t elem_size); // Initialize the members of this class. The biased start address of this array // is the bias (in elements) multiplied by the element size. diff --git a/src/share/vm/memory/padded.hpp b/src/share/vm/memory/padded.hpp index a03c2ba56..9ddd14f85 100644 --- a/src/share/vm/memory/padded.hpp +++ b/src/share/vm/memory/padded.hpp @@ -101,4 +101,12 @@ class Padded2DArray { static T** create_unfreeable(uint rows, uint columns, size_t* allocation_size = NULL); }; +// Helper class to create an array of T objects. The array as a whole will +// start at a multiple of alignment and its size will be aligned to alignment. +template +class PaddedPrimitiveArray { + public: + static T* create_unfreeable(size_t length); +}; + #endif // SHARE_VM_MEMORY_PADDED_HPP diff --git a/src/share/vm/memory/padded.inline.hpp b/src/share/vm/memory/padded.inline.hpp index e773c4075..1e4f88584 100644 --- a/src/share/vm/memory/padded.inline.hpp +++ b/src/share/vm/memory/padded.inline.hpp @@ -76,3 +76,13 @@ T** Padded2DArray::create_unfreeable(uint rows, uint column return result; } + +template +T* PaddedPrimitiveArray::create_unfreeable(size_t length) { + // Allocate a chunk of memory large enough to allow for some alignment. + void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags); + + memset(chunk, 0, length * sizeof(T) + alignment); + + return (T*)align_pointer_up(chunk, alignment); +} -- GitLab