提交 f7e1fdd3 编写于 作者: H Heikki Linnakangas

Remove unused Slab memory pool and Slab cache stuff

上级 80305df7
......@@ -167,18 +167,12 @@ add_library(gpos
src/memory/CMemoryPoolInjectFault.cpp
include/gpos/memory/CMemoryPoolManager.h
src/memory/CMemoryPoolManager.cpp
include/gpos/memory/CMemoryPoolSlab.h
src/memory/CMemoryPoolSlab.cpp
include/gpos/memory/CMemoryPoolStack.h
src/memory/CMemoryPoolStack.cpp
include/gpos/memory/CMemoryPoolTracker.h
src/memory/CMemoryPoolTracker.cpp
include/gpos/memory/CMemoryVisitorPrint.h
src/memory/CMemoryVisitorPrint.cpp
include/gpos/memory/CSlab.h
src/memory/CSlab.cpp
include/gpos/memory/CSlabCache.h
src/memory/CSlabCache.cpp
include/gpos/memory/IMemoryPool.h
src/memory/IMemoryPool.cpp
include/gpos/net/netutils.h
......
......@@ -42,8 +42,7 @@ namespace gpos
enum EAllocType
{
EatTracker,
EatStack,
EatSlab
EatStack
};
private:
......
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2011 Greenplum Inc.
//
// @filename:
// CMemoryPoolSlab.h
//
// @doc:
// Memory pool that implements a slab allocator.
//
// @owner:
//
// @test:
//
//---------------------------------------------------------------------------
#ifndef GPOS_CMemoryPoolSlab_H
#define GPOS_CMemoryPoolSlab_H
#include "gpos/base.h"
#include "gpos/utils.h"
#include "gpos/memory/CMemoryPool.h"
#include "gpos/memory/CSlabCache.h"
#define GPOS_MEM_SLAB_CACHE_GROUPS (14)
#define GPOS_MEM_SLAB_CACHES_PER_GROUP (1 << 2) // must be power of 2
namespace gpos
{
//---------------------------------------------------------------------------
// @class:
// CMemoryPoolSlab
//
// @doc:
//
// Slab allocator;
// contains caches of equally-sized slabs;
// each slab is broken to memory slots that are used for allocations;
//
//---------------------------------------------------------------------------
class CMemoryPoolSlab : public CMemoryPool
{
private:
//---------------------------------------------------------------------------
// @class:
// SAllocXLHeader
//
// @doc:
//
// Header for XL allocations
//
//---------------------------------------------------------------------------
struct SAllocXLHeader
{
// ID
ULONG_PTR m_ulpId;
// allocation size
ULONG m_ulAllocSize;
// link for hashtable
SLink m_link;
// allocation type; set to XL allocation;
// use ULONG_PTR for alignment;
ULONG_PTR ulpSlabAlloc;
// invalid ID
static
ULONG_PTR m_ulpIdInvalid;
// get header from allocation
static
SAllocXLHeader *Pallocxl(void *pv)
{
SAllocXLHeader *pallocxl = static_cast<SAllocXLHeader*>(pv) - 1;
GPOS_ASSERT(CSlab::EatXLAlloc == pallocxl->ulpSlabAlloc);
return pallocxl;
}
};
typedef CSyncHashtable<SAllocXLHeader, ULONG_PTR, CSpinlockOS>
AllocXLHashTable;
typedef CSyncHashtableAccessByKey<SAllocXLHeader, ULONG_PTR, CSpinlockOS>
AllocXLKeyAccessor;
typedef CSyncHashtableIter<SAllocXLHeader, ULONG_PTR, CSpinlockOS>
AllocXLIter;
typedef CSyncHashtableAccessByIter<SAllocXLHeader, ULONG_PTR, CSpinlockOS>
AllocXLIterAccessor;
// array of cache groups
CSlabCache *m_rgrgpsc[GPOS_MEM_SLAB_CACHE_GROUPS][GPOS_MEM_SLAB_CACHES_PER_GROUP];
// index of last used cache per group
ULONG_PTR m_rgulpCacheIdx[GPOS_MEM_SLAB_CACHE_GROUPS];
// hash table of XL allocations
AllocXLHashTable *m_pshtAllocXL;
// size of reserved memory;
// this includes total allocated memory and pending allocations;
volatile ULONG_PTR m_ulpReserved;
// max memory to allow in the pool;
// if equal to ULLONG, checks for exceeding max memory are bypassed
const ULLONG m_ullCapacity;
// initialize slab caches
void InitCaches();
// find cache corresponding to allocation size
CSlabCache *Psc(ULONG ulAlloc);
// allocate XL object
void *PvAllocateXL(const ULONG ulBytes, const CHAR *szFile, const ULONG ulLine);
// release XL object
void FreeXL(SAllocXLHeader *pallocxl);
// check if capacity is set
BOOL FSetCapacity() const
{
return ULLONG_MAX != m_ullCapacity;
}
// update reserved memory with allocation request
BOOL FReserveMem(ULONG ulAlloc);
// update reserved memory with release request
void UnreserveMem(ULONG ulAlloc);
// check if allocation is in a slab
static
BOOL FSlabAlloc
(
void *pv
)
{
// check flag (ULONG_PTR) before allocation
return (CSlab::EatSlabAlloc == *(static_cast<ULONG_PTR*>(pv) -1));
}
#ifdef GPOS_DEBUG
// check if a particular allocation is sound for this memory pool
void CheckAllocation(void *pv);
#endif // GPOS_DEBUG
// private copy ctor
CMemoryPoolSlab(CMemoryPoolSlab &);
public:
// ctor
CMemoryPoolSlab
(
IMemoryPool *pmp,
ULLONG ullCapacity,
BOOL fThreadSafe,
BOOL fOwnsUnderlying
);
// dtor
virtual
~CMemoryPoolSlab();
// allocate memory
virtual
void *PvAllocate
(
const ULONG ulBytes,
const CHAR *szFile,
const ULONG ulLine
);
// free memory - memory is released when the memory pool is torn down
virtual
void Free(void *pv);
// return all used memory to the underlying pool and tear it down
virtual
void TearDown();
// check if the pool stores a pointer to itself at the end of
// the header of each allocated object;
virtual
BOOL FStoresPoolPointer() const
{
return true;
}
// return total allocated size
virtual
ULLONG UllTotalAllocatedSize() const
{
return (ULLONG) m_ulpReserved;
}
};
}
#endif // !GPOS_CMemoryPoolSlab_H
// EOF
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 Greenplum Inc.
//
// @filename:
// CSlab.h
//
// @doc:
// Memory slab, divided to equally sized slots that are used to
// allocate objects of size smaller or equal to the slot size.
//
// @owner:
//
// @test:
//
//---------------------------------------------------------------------------
#ifndef GPOS_CSlab_H
#define GPOS_CSlab_H
#include "gpos/base.h"
#include "gpos/utils.h"
#include "gpos/common/CSyncList.h"
namespace gpos
{
// fwd declaration
class CSlabCache;
//---------------------------------------------------------------------------
// @class:
// CSlab
//
// @doc:
//
// Memory slab; contains an array of slots;
//
//---------------------------------------------------------------------------
class CSlab
{
// cache accesses internal functionality
friend class CSlabCache;
public:
// enum indicating allocation type
enum EAllocType
{
EatSlabAlloc,
EatXLAlloc
};
private:
//---------------------------------------------------------------------------
// @class:
// SSlotHeader
//
// @doc:
//
// Memory slot header; used for individual allocations;
//
//---------------------------------------------------------------------------
struct SSlotHeader
{
// pointer to owning slab
CSlab *m_pslabOwner;
// pointer to next free slot
SSlotHeader *m_pshNext;
// allocation type; set to slab allocation;
// use ULONG_PTR for alignment;
ULONG_PTR ulpAllocType;
};
// slab ID
const ULONG_PTR m_ulpId;
// number of slots
const ULONG m_ulSlots;
// slot size
const ULONG m_ulSlotSize;
// counter of free slots
ULONG m_ulSlotsFree;
// flag indicating if slab is in cache's stack of active slabs;
BOOL m_fActive;
// top of free slot stack
SSlotHeader *m_pshstackFree;
// pointer to owning cache
CSlabCache *m_pscOwner;
// spin lock - shared by all slabs of a cache
CSpinlockBase &m_slock;
// link for cache's list of active slabs
SLink m_linkActive;
// link for cache's list of allocated slabs
SLink m_linkAllocated;
// invalid slab ID
static
ULONG_PTR m_ulpIdInvalid;
// get slot by offset
SSlotHeader &GetSlot(ULONG ulOffset);
// push slot to stack
void Push
(
SSlotHeader *psh
)
{
GPOS_ASSERT(NULL != psh);
psh->m_pshNext = m_pshstackFree;
m_pshstackFree = psh;
}
// pop slot from stack
SSlotHeader *PshPop()
{
SSlotHeader *psh = m_pshstackFree;
if (NULL != m_pshstackFree)
{
m_pshstackFree = m_pshstackFree->m_pshNext;
}
return psh;
}
// mark slab as inactive
void SetInactive()
{
GPOS_ASSERT(m_fActive);
m_fActive = false;
}
// no copy ctor
CSlab(const CSlab &);
public:
// ctor
CSlab
(
CSlabCache *pscOwner,
ULONG ulSlots,
ULONG ulSlotSize,
CSpinlockBase &slock
);
// reserve slot
void *PvAllocate(BOOL fNewSlab);
// recycle slot
void Free(void *pvObj);
// check if slab is unused
BOOL FUnused() const
{
return m_ulSlots == m_ulSlotsFree;
}
// owner accessor
CSlabCache *Psc() const
{
return m_pscOwner;
}
// calculate slab size
static
ULONG UlSize(ULONG ulSlots, ULONG ulSlotSize);
// get slab cache from slot
static
CSlab *PslabConvert(void *pv);
#ifdef GPOS_DEBUG
BOOL FFull() const
{
return 0 == m_ulSlotsFree && NULL == m_pshstackFree;
}
#endif // GPOS_DEBUG
};
}
#endif // !GPOS_CSlab_H
// EOF
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 Greenplum Inc.
//
// @filename:
// CSlabCache.h
//
// @doc:
// Memory slab cache;
// creates and releases slabs, keeps track of which slabs are active
//
// @owner:
//
// @test:
//
//---------------------------------------------------------------------------
#ifndef GPOS_CSlabCache_H
#define GPOS_CSlabCache_H
#include "gpos/base.h"
#include "gpos/utils.h"
#include "gpos/common/CList.h"
#include "gpos/common/CSyncHashtable.h"
#include "gpos/common/CSyncHashtableAccessByKey.h"
#include "gpos/common/CSyncHashtableAccessByIter.h"
#include "gpos/common/CSyncHashtableIter.h"
#include "gpos/memory/CSlab.h"
namespace gpos
{
//---------------------------------------------------------------------------
// @class:
// CSlabCache
//
// @doc:
//
// Slab cache; manages slabs;
//
//---------------------------------------------------------------------------
class CSlabCache
{
private:
// type shorthands
typedef CList<CSlab> SlabList;
// underlying pool
IMemoryPool *m_pmp;
// list of slabs that are actively used for allocations
SlabList *m_pslablistActive;
// list of allocated slabs
SlabList *m_pslablistAllocated;
// spinlock for synchronization of all operations
CSpinlockOS m_slock;
// slot size
const ULONG m_ulSlotSize;
// slots per slab
const ULONG m_ulSlots;
// threshold where release of unused slabs starts (upper boound)
const ULONG m_ulThresholdHigh;
// threshold where release of unused slabs ends (lower bound)
const ULONG m_ulThresholdLow;
// unused slab counter
ULONG m_ulUnused;
// reserved memory - shared by all caches of a pool
volatile ULONG_PTR *m_pulpReserved;
// memory pool capacity
const ULLONG m_ullCapacity;
// flag indicating if cache is thread-safe
const BOOL m_fThreadSafe;
// flag indicating if slab cache has been torn down
BOOL m_fTornDown;
// initialize slab
CSlab *PslabCreate();
// release slab
void Release(CSlab *ps);
// release unused slabs
void Shrink(CAutoSpinlock &as);
// allocate memory for slab
void *PvAllocateSlab()
{
return m_pmp->PvAllocate
(
UlSlabSize(),
NULL /*szFile*/,
0 /*ulLine*/
);
}
// attempt to allocate slot in active slabs
void *PvAllocateSlot();
// get slab size
ULONG UlSlabSize() const
{
return CSlab::UlSize(m_ulSlots, m_ulSlotSize);
}
// reserve memory for new slab
BOOL FReserveMem();
// update reserved memory with release request
void UnreserveMem();
// no copy ctor
CSlabCache(const CSlabCache&);
public:
// ctor
CSlabCache
(
IMemoryPool *pmp,
ULONG ulChunkSize,
ULONG ulChunks,
ULONG ulThresholdHigh,
ULONG ulThresholdLow,
volatile ULONG_PTR *pulpReserved,
ULLONG ullCapacity,
BOOL fThreadSafe
);
// dtor
~CSlabCache();
// allocate object
void *PvAllocate();
// slot size accessor
ULONG UlSlotSize() const
{
return m_ulSlotSize;
}
// increment counter of unused slabs; may trigger shrinking
void IncUnused
(
CAutoSpinlock &as
)
{
GPOS_ASSERT_IMP(m_fThreadSafe, m_slock.FOwned());
if (++m_ulUnused > m_ulThresholdHigh)
{
Shrink(as);
}
}
// decrement counter of unused slabs
void DecrUnused()
{
GPOS_ASSERT_IMP(m_fThreadSafe, m_slock.FOwned());
m_ulUnused--;
}
// add slab to cache
void Add
(
CSlab *pslab
)
{
GPOS_ASSERT_IMP(m_fThreadSafe, m_slock.FOwned());
m_pslablistActive->Append(pslab);
}
// release all slabs
void Teardown();
// acquire spinlock
void SLock(CAutoSpinlock &as)
{
if (m_fThreadSafe)
{
as.Lock();
}
}
// release spinlock
void SUnlock(CAutoSpinlock &as)
{
if (m_fThreadSafe)
{
as.Unlock();
}
}
#ifdef GPOS_DEBUG
// check if slab belongs to cache
BOOL FOwns(CSlab *pslab);
// check if cache has no slab
BOOL FTornDown() const
{
return m_fTornDown;
}
#endif // GPOS_DEBUG
};
}
#endif // !GPOS_CSlabCache_H
// EOF
......@@ -112,7 +112,7 @@ COstreamStringTest::EresUnittest_OOM()
CAutoMemoryPool amp
(
CAutoMemoryPool::ElcExc,
CMemoryPoolManager::EatSlab,
CMemoryPoolManager::EatTracker,
false /*fThreadSafe*/,
1024
);
......
......@@ -63,7 +63,6 @@ CMemoryPoolBasicTest::EresUnittest()
GPOS_UNITTEST_FUNC(CMemoryPoolBasicTest::EresUnittest_Print),
#endif // GPOS_DEBUG
GPOS_UNITTEST_FUNC(CMemoryPoolBasicTest::EresUnittest_TestTracker),
GPOS_UNITTEST_FUNC(CMemoryPoolBasicTest::EresUnittest_TestSlab),
GPOS_UNITTEST_FUNC(CMemoryPoolBasicTest::EresUnittest_TestStack),
};
......@@ -117,21 +116,6 @@ CMemoryPoolBasicTest::EresUnittest_TestTracker()
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolBasicTest::EresUnittest_TestSlab
//
// @doc:
// Run tests for pool using slab allocation
//
//---------------------------------------------------------------------------
GPOS_RESULT
CMemoryPoolBasicTest::EresUnittest_TestSlab()
{
return EresTestType(CMemoryPoolManager::EatSlab);
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolBasicTest::EresUnittest_TestStack
......
......@@ -127,7 +127,7 @@ CWStringTest::EresUnittest_AppendFormat()
CAutoMemoryPool amp
(
CAutoMemoryPool::ElcExc,
CMemoryPoolManager::EatSlab,
CMemoryPoolManager::EatTracker,
false /*fThreadSafe*/,
1024 * 1024 /*ullSizeMax*/
);
......
......@@ -21,7 +21,6 @@
#include "gpos/memory/CMemoryPoolAlloc.h"
#include "gpos/memory/CMemoryPoolInjectFault.h"
#include "gpos/memory/CMemoryPoolManager.h"
#include "gpos/memory/CMemoryPoolSlab.h"
#include "gpos/memory/CMemoryPoolStack.h"
#include "gpos/memory/CMemoryPoolTracker.h"
#include "gpos/memory/CMemoryVisitorPrint.h"
......@@ -208,15 +207,6 @@ CMemoryPoolManager::PmpNew
fOwnsUnderlying
);
case CMemoryPoolManager::EatSlab:
return GPOS_NEW(m_pmpInternal) CMemoryPoolSlab
(
pmpUnderlying,
ullCapacity,
fThreadSafe,
fOwnsUnderlying
);
case CMemoryPoolManager::EatStack:
return GPOS_NEW(m_pmpInternal) CMemoryPoolStack
(
......
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 EMC Corp.
//
// @filename:
// CMemoryPoolSlab.cpp
//
// @doc:
// Implementation of slab allocator.
//
// @owner:
//
// @test:
//
//---------------------------------------------------------------------------
#include "gpos/base.h"
#include "gpos/common/CAutoP.h"
#include "gpos/memory/CMemoryPoolManager.h"
#include "gpos/memory/CMemoryPoolSlab.h"
#define GPOS_MEM_SLAB_XL_HT_SIZE (128)
#define GPOS_MEM_SLAB_SLOT_SIZE_S (1024)
#define GPOS_MEM_SLAB_SLOT_SIZE_M (16 * 1024)
#define GPOS_MEM_SLAB_SLOT_SIZE_L (128 * 1024)
#define GPOS_MEM_SLAB_SLOTS_S (128)
#define GPOS_MEM_SLAB_SLOTS_M (16)
#define GPOS_MEM_SLAB_SLOTS_L (8)
#define GPOS_MEM_SLAB_SLOTS_XL (4)
#define GPOS_MEM_SLAB_THRES_HIGH_S (16)
#define GPOS_MEM_SLAB_THRES_HIGH_M (8)
#define GPOS_MEM_SLAB_THRES_HIGH_L (4)
#define GPOS_MEM_SLAB_THRES_HIGH_XL (4)
#define GPOS_MEM_SLAB_THRES_LOW_S (8)
#define GPOS_MEM_SLAB_THRES_LOW_M (4)
#define GPOS_MEM_SLAB_THRES_LOW_L (2)
#define GPOS_MEM_SLAB_THRES_LOW_XL (2)
using namespace gpos;
// initialization of static members
ULONG_PTR CMemoryPoolSlab::SAllocXLHeader::m_ulpIdInvalid(0);
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::CMemoryPoolSlab
//
// @doc:
// Ctor
//
//---------------------------------------------------------------------------
CMemoryPoolSlab::CMemoryPoolSlab
(
IMemoryPool *pmp,
ULLONG ullCapacity,
BOOL fThreadSafe,
BOOL fOwnsUnderlying
)
:
CMemoryPool(pmp, fOwnsUnderlying, fThreadSafe),
m_pshtAllocXL(NULL),
m_ulpReserved(0),
m_ullCapacity(ullCapacity)
{
GPOS_ASSERT(NULL != pmp);
// hash table of XL allocations
CAutoP<AllocXLHashTable> a_pshtChunksXL;
a_pshtChunksXL = GPOS_NEW(PmpUnderlying()) AllocXLHashTable();
a_pshtChunksXL.Pt()->Init
(
PmpUnderlying(),
GPOS_MEM_SLAB_XL_HT_SIZE,
GPOS_OFFSET(SAllocXLHeader, m_link),
GPOS_OFFSET(SAllocXLHeader, m_ulpId),
&SAllocXLHeader::m_ulpIdInvalid,
UlHashUlp,
FEqualUlp
);
InitCaches();
m_pshtAllocXL = a_pshtChunksXL.PtReset();
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::InitCaches
//
// @doc:
// Initialize slab caches
//
//---------------------------------------------------------------------------
void
CMemoryPoolSlab::InitCaches()
{
// guard against OOM during initialization
CAutoP<CSlabCache> a_rgsc[GPOS_ARRAY_SIZE(m_rgrgpsc)][GPOS_ARRAY_SIZE(m_rgrgpsc[0])];
for (ULONG ulGroup = 0; ulGroup < GPOS_ARRAY_SIZE(a_rgsc); ulGroup++)
{
ULONG ulChunkSize = ULONG(1) << (ulGroup + 5);
ULONG ulChunks = GPOS_MEM_SLAB_SLOTS_XL;
ULONG ulThresholdHigh = GPOS_MEM_SLAB_THRES_HIGH_XL;
ULONG ulThresholdLow = GPOS_MEM_SLAB_THRES_LOW_XL;
if (GPOS_MEM_SLAB_SLOT_SIZE_S > ulChunkSize)
{
ulChunks = GPOS_MEM_SLAB_SLOTS_S;
ulThresholdHigh = GPOS_MEM_SLAB_THRES_HIGH_S;
ulThresholdLow = GPOS_MEM_SLAB_THRES_LOW_S;
}
else if (GPOS_MEM_SLAB_SLOT_SIZE_M > ulChunkSize)
{
ulChunks = GPOS_MEM_SLAB_SLOTS_M;
ulThresholdHigh = GPOS_MEM_SLAB_THRES_HIGH_M;
ulThresholdLow = GPOS_MEM_SLAB_THRES_LOW_M;
}
else if (GPOS_MEM_SLAB_SLOT_SIZE_L > ulChunkSize)
{
ulChunks = GPOS_MEM_SLAB_SLOTS_L;
ulThresholdHigh = GPOS_MEM_SLAB_THRES_HIGH_L;
ulThresholdLow = GPOS_MEM_SLAB_THRES_LOW_L;
}
for (ULONG ulIdx = 0; ulIdx < GPOS_ARRAY_SIZE(a_rgsc[ulGroup]); ulIdx++)
{
a_rgsc[ulGroup][ulIdx] = GPOS_NEW(PmpUnderlying()) CSlabCache
(
PmpUnderlying(),
ulChunkSize,
ulChunks,
ulThresholdHigh,
ulThresholdLow,
&m_ulpReserved,
m_ullCapacity,
FThreadSafe()
);
}
}
for (ULONG ulGroup = 0; ulGroup < GPOS_ARRAY_SIZE(a_rgsc); ulGroup++)
{
for (ULONG ulIdx = 0; ulIdx < GPOS_ARRAY_SIZE(a_rgsc[ulGroup]); ulIdx++)
{
m_rgrgpsc[ulGroup][ulIdx] = a_rgsc[ulGroup][ulIdx].PtReset();
}
m_rgulpCacheIdx[ulGroup] = 0;
}
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::~CMemoryPoolSlab
//
// @doc:
// Dtor
//
//---------------------------------------------------------------------------
CMemoryPoolSlab::~CMemoryPoolSlab()
{}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::PvAllocate
//
// @doc:
// Allocate memory
//
//---------------------------------------------------------------------------
void *
CMemoryPoolSlab::PvAllocate
(
ULONG ulBytes,
const CHAR *szFile,
const ULONG ulLine
)
{
CSlabCache *psl = Psc(ulBytes);
if (NULL != psl)
{
// slab allocation
GPOS_ASSERT(ulBytes <= psl->UlSlotSize());
return psl->PvAllocate();
}
// XL allocation
return PvAllocateXL(ulBytes, szFile, ulLine);
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::Free
//
// @doc:
// Free memory.
//
//---------------------------------------------------------------------------
void
CMemoryPoolSlab::Free
(
void *pv
)
{
#ifdef GPOS_DEBUG
CheckAllocation(pv);
#endif // GPOS_DEBUG
if (FSlabAlloc(pv))
{
// slab allocation
CSlab *pslab = CSlab::PslabConvert(pv);
pslab->Free(pv);
}
else
{
// XL allocation
FreeXL(SAllocXLHeader::Pallocxl(pv));
}
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::TearDown
//
// @doc:
// Return all used memory to the underlying pool and tear it down
//
//---------------------------------------------------------------------------
void
CMemoryPoolSlab::TearDown()
{
// release slabs in caches
for (ULONG ulGroup = 0; ulGroup < GPOS_ARRAY_SIZE(m_rgrgpsc); ulGroup++)
{
for (ULONG ulIdx = 0; ulIdx < GPOS_ARRAY_SIZE(m_rgrgpsc[ulGroup]); ulIdx++)
{
m_rgrgpsc[ulGroup][ulIdx]->Teardown();
GPOS_DELETE(m_rgrgpsc[ulGroup][ulIdx]);
}
}
// release XL allocations
SAllocXLHeader *pallocxl = NULL;
AllocXLIter shtit(*m_pshtAllocXL);
while (NULL != pallocxl || shtit.FAdvance())
{
if (NULL != pallocxl)
{
PmpUnderlying()->Free(pallocxl);
}
AllocXLIterAccessor shtitacc(shtit);
pallocxl = shtitacc.Pt();
if (NULL != pallocxl)
{
shtitacc.Remove(pallocxl);
}
}
// release hash table
GPOS_DELETE(m_pshtAllocXL);
CMemoryPool::TearDown();
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::Psc
//
// @doc:
// Find cache corresponding to allocation size
//
//---------------------------------------------------------------------------
CSlabCache *
CMemoryPoolSlab::Psc
(
ULONG ulAlloc
)
{
if (ulAlloc > m_rgrgpsc[GPOS_ARRAY_SIZE(m_rgrgpsc) - 1][0]->UlSlotSize())
{
// XL allocation, don't use slabs
return NULL;
}
ULONG ulLow = 0;
ULONG ulHigh = GPOS_ARRAY_SIZE(m_rgrgpsc) - 1;
ULONG ulGroup = ULONG_MAX;
// binary search on slot size per cache
while (ulLow + 1 < ulHigh)
{
const ULONG ulMid = (ulLow + ulHigh) / 2;
const ULONG ulMidChunkSize = m_rgrgpsc[ulMid][0]->UlSlotSize();
if (ulAlloc == ulMidChunkSize)
{
ulGroup = ulMid;
break;
}
if (ulAlloc > ulMidChunkSize)
{
ulLow = ulMid;
}
else
{
ulHigh = ulMid;
}
}
if (ULONG_MAX == ulGroup)
{
if (ulAlloc <= m_rgrgpsc[ulLow][0]->UlSlotSize())
{
ulGroup = ulLow;
}
else
{
GPOS_ASSERT(ulAlloc <= m_rgrgpsc[ulHigh][0]->UlSlotSize());
ulGroup = ulHigh;
}
}
ULONG ulIdx =
UlpExchangeAdd(&m_rgulpCacheIdx[ulGroup], 1) &
(GPOS_MEM_SLAB_CACHES_PER_GROUP - 1);
return m_rgrgpsc[ulGroup][ulIdx];
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::PvAllocateXL
//
// @doc:
// Allocate XL object
//
//---------------------------------------------------------------------------
void *
CMemoryPoolSlab::PvAllocateXL
(
const ULONG ulBytes,
const CHAR *szFile,
const ULONG ulLine
)
{
const ULONG ulAllocSize =
GPOS_MEM_ALIGNED_SIZE(GPOS_SIZEOF(SAllocXLHeader)) +
GPOS_MEM_ALIGNED_SIZE(ulBytes);
if (FSetCapacity() && !FReserveMem(ulAllocSize))
{
return NULL;
}
void *pvAlloc = PmpUnderlying()->PvAllocate(ulAllocSize, szFile, ulLine);
if (NULL == pvAlloc)
{
if (FSetCapacity())
{
UnreserveMem(ulAllocSize);
}
return NULL;
}
SAllocXLHeader *pallocxl = static_cast<SAllocXLHeader*>(pvAlloc);
pallocxl->m_ulpId = (ULONG_PTR) pallocxl;
pallocxl->m_ulAllocSize = ulAllocSize;
pallocxl->ulpSlabAlloc = CSlab::EatXLAlloc;
// scope for accessor
{
AllocXLKeyAccessor shtacc(*m_pshtAllocXL, pallocxl->m_ulpId);
shtacc.Insert(pallocxl);
}
// return pointer after header (allocation body)
return pallocxl + 1;
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::FreeXL
//
// @doc:
// Release XL object
//
//---------------------------------------------------------------------------
void
CMemoryPoolSlab::FreeXL
(
SAllocXLHeader *pallocxl
)
{
if (FSetCapacity())
{
UnreserveMem(pallocxl->m_ulAllocSize);
}
// scope for accessor
{
AllocXLKeyAccessor shtacc(*m_pshtAllocXL, pallocxl->m_ulpId);
shtacc.Remove(pallocxl);
}
PmpUnderlying()->Free(pallocxl);
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::FReserveMem
//
// @doc:
// Update reserved memory with allocation request
//
//---------------------------------------------------------------------------
BOOL
CMemoryPoolSlab::FReserveMem
(
ULONG ulAlloc
)
{
if (m_ulpReserved + ulAlloc <= m_ullCapacity)
{
ULONG_PTR ulpOld = UlpExchangeAdd(&m_ulpReserved, ulAlloc);
if (ulpOld + ulAlloc <= m_ullCapacity)
{
return true;
}
UnreserveMem(ulAlloc);
}
return false;
}
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::UnreserveMem
//
// @doc:
// Update reserved memory with release request
//
//---------------------------------------------------------------------------
void
CMemoryPoolSlab::UnreserveMem
(
ULONG ulAlloc
)
{
(void) UlpExchangeAdd(&m_ulpReserved, -ulAlloc);
}
#ifdef GPOS_DEBUG
//---------------------------------------------------------------------------
// @function:
// CMemoryPoolSlab::CheckAllocation
//
// @doc:
// Verify that an allocation is correct and came from this pool
//
//---------------------------------------------------------------------------
void
CMemoryPoolSlab::CheckAllocation
(
void *pv
)
{
if (FSlabAlloc(pv))
{
CSlab *pslab = CSlab::PslabConvert(pv);
BOOL fFound = false;
for (ULONG ulGroup = 0; ulGroup < GPOS_ARRAY_SIZE(m_rgrgpsc); ulGroup++)
{
for (ULONG ulIdx = 0; ulIdx < GPOS_ARRAY_SIZE(m_rgrgpsc[ulIdx]); ulIdx++)
{
if (pslab->Psc() == m_rgrgpsc[ulGroup][ulIdx])
{
GPOS_ASSERT_IMP(m_rgrgpsc[ulGroup][ulIdx]->FOwns(pslab), !fFound);
fFound = true;
}
}
}
GPOS_ASSERT(fFound && "Allocation does not belong to any cache");
}
else
{
SAllocXLHeader *pallocxl = static_cast<SAllocXLHeader*>(pv) - 1;
AllocXLKeyAccessor shtacc(*m_pshtAllocXL, pallocxl->m_ulpId);
GPOS_ASSERT(NULL != shtacc.PtLookup());
}
}
#endif // GPOS_DEBUG
// EOF
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 EMC Corp.
//
// @filename:
// CSlab.cpp
//
// @doc:
// Implementation of memory slab
//
// @owner:
//
// @test:
//
//---------------------------------------------------------------------------
#include "gpos/base.h"
#include "gpos/memory/CMemoryPool.h"
#include "gpos/memory/CSlab.h"
#include "gpos/memory/CSlabCache.h"
#include "gpos/sync/atomic.h"
#define GPOS_MEM_SLAB_REACTIVATE_PERCENT (50)
using namespace gpos;
// initialization of static variables
ULONG_PTR CSlab::m_ulpIdInvalid(0);
//---------------------------------------------------------------------------
// @function:
// CSlab::CSlab
//
// @doc:
// Ctor
//
//---------------------------------------------------------------------------
CSlab::CSlab
(
CSlabCache *pscOwner,
ULONG ulChunks,
ULONG ulChunkSize,
CSpinlockBase &slock
)
:
m_ulpId((ULONG_PTR) this),
m_ulSlots(ulChunks),
m_ulSlotSize(ulChunkSize),
m_ulSlotsFree(ulChunks),
m_fActive(true),
m_pshstackFree(NULL),
m_pscOwner(pscOwner),
m_slock(slock)
{
GPOS_ASSERT(NULL != pscOwner);
GPOS_ASSERT(MAX_ALIGNED(this));
GPOS_ASSERT(MAX_ALIGNED(m_ulSlotSize));
GPOS_ASSERT(0 < ulChunks);
GPOS_ASSERT(0 < ulChunkSize);
// initialize slots
for (ULONG ul = 0; ul < m_ulSlots; ul++)
{
SSlotHeader &slot(GetSlot(ul));
slot.m_pslabOwner = this;
slot.ulpAllocType = EatSlabAlloc;
slot.m_pshNext = NULL;
Push(&slot);
}
}
//---------------------------------------------------------------------------
// @function:
// CSlab::PvAllocate
//
// @doc:
// Reserve slot
//
//---------------------------------------------------------------------------
void *
CSlab::PvAllocate
(
BOOL fNewSlab
)
{
SSlotHeader *psh = PshPop();
GPOS_ASSERT(MAX_ALIGNED(psh));
// check if all slots in slab are used
if (NULL == psh)
{
GPOS_ASSERT(0 == m_ulSlotsFree);
return NULL;
}
GPOS_ASSERT(0 < m_ulSlotsFree);
if (!fNewSlab && m_ulSlotsFree == m_ulSlots)
{
// slab was not used before;
// decrement unused slab counter in cache;
m_pscOwner->DecrUnused();
}
m_ulSlotsFree--;
// return position after slot header
return psh + 1;
}
//---------------------------------------------------------------------------
// @function:
// CSlab::Free
//
// @doc:
// Recycle slot
//
//---------------------------------------------------------------------------
void
CSlab::Free
(
void *pvObj
)
{
GPOS_ASSERT(!m_slock.FOwned());
// slot header precedes user data
SSlotHeader *psh = static_cast<SSlotHeader*>(pvObj) - 1;
GPOS_ASSERT(EatSlabAlloc == psh->ulpAllocType);
CAutoSpinlock as(m_slock);
m_pscOwner->SLock(as);
// recycle slot
Push(psh);
m_ulSlotsFree++;
// check if the slab was inactive and can now be re-activated
if (!m_fActive &&
m_ulSlotsFree > (m_ulSlots * GPOS_MEM_SLAB_REACTIVATE_PERCENT) / 100)
{
// set this to active
m_fActive = true;
m_pscOwner->Add(psh->m_pslabOwner);
}
// check if this was the last used slot in the slab
if (m_ulSlotsFree == m_ulSlots)
{
// increment unused slab counter in cache
m_pscOwner->IncUnused(as);
}
}
//---------------------------------------------------------------------------
// @function:
// CSlab::GetSlot
//
// @doc:
// Get slot by offset
//
//---------------------------------------------------------------------------
CSlab::SSlotHeader &
CSlab::GetSlot
(
ULONG ulOffset
)
{
BYTE *pbSlotPosition =
reinterpret_cast<BYTE*>(this + 1) +
ulOffset * (GPOS_MEM_ALIGNED_SIZE(GPOS_SIZEOF(SSlotHeader)) + m_ulSlotSize);
SSlotHeader &slot = *(reinterpret_cast<SSlotHeader*>(pbSlotPosition));
GPOS_ASSERT(MAX_ALIGNED(&slot));
return slot;
}
//---------------------------------------------------------------------------
// @function:
// CSlab::UlSize
//
// @doc:
// Calculate slab size
//
//---------------------------------------------------------------------------
ULONG
CSlab::UlSize
(
ULONG ulSlots,
ULONG ulSlotSize
)
{
GPOS_ASSERT(MAX_ALIGNED(ulSlotSize));
const ULONG ulHeader = GPOS_MEM_ALIGNED_SIZE(GPOS_SIZEOF(CSlab));
const ULONG ulTotalSlotSize =
ulSlots * (GPOS_MEM_ALIGNED_SIZE(GPOS_SIZEOF(SSlotHeader)) + ulSlotSize);
const ULONG ulTotalSize = ulHeader + ulTotalSlotSize;
GPOS_ASSERT(MAX_ALIGNED(ulTotalSize));
return ulTotalSize;
}
//---------------------------------------------------------------------------
// @function:
// CSlab::PslabConvert
//
// @doc:
// Get slab cache from slot
//
//---------------------------------------------------------------------------
CSlab *
CSlab::PslabConvert(void *pv)
{
// slot header precedes user data
SSlotHeader *psh = static_cast<SSlotHeader*>(pv) - 1;
CSlab *pslab = psh->m_pslabOwner;
GPOS_ASSERT
(
pv >= (pslab + 1) &&
pv < pslab + UlSize(pslab->m_ulSlots, pslab->m_ulSlotSize)
);
return pslab;
}
// EOF
//---------------------------------------------------------------------------
// Greenplum Database
// Copyright (C) 2012 EMC Corp.
//
// @filename:
// CSlabCache.cpp
//
// @doc:
// Implementation of memory slab cache
//
// @owner:
//
// @test:
//
//---------------------------------------------------------------------------
#include "gpos/base.h"
#include "gpos/common/CAutoP.h"
#include "gpos/memory/CMemoryPool.h"
#include "gpos/memory/CSlabCache.h"
#include "gpos/sync/atomic.h"
#include "gpos/task/CAutoSuspendAbort.h"
#define GPOS_MEM_SLAB_CACHE_HT_SIZE (128)
using namespace gpos;
//---------------------------------------------------------------------------
// @function:
// CSlabCache::CSlabCache
//
// @doc:
// Ctor
//
//---------------------------------------------------------------------------
CSlabCache::CSlabCache
(
IMemoryPool *pmp,
ULONG ulChunkSize,
ULONG ulChunks,
ULONG ulThresholdHigh,
ULONG ulThresholdLow,
volatile ULONG_PTR *pulpReserved,
ULLONG ullCapacity,
BOOL fThreadSafe
)
:
m_pmp(pmp),
m_pslablistActive(NULL),
m_pslablistAllocated(NULL),
m_ulSlotSize(ulChunkSize),
m_ulSlots(ulChunks),
m_ulThresholdHigh(ulThresholdHigh),
m_ulThresholdLow(ulThresholdLow),
m_ulUnused(0),
m_pulpReserved(pulpReserved),
m_ullCapacity(ullCapacity),
m_fThreadSafe(fThreadSafe),
m_fTornDown(false)
{
GPOS_ASSERT(NULL != m_pmp);
GPOS_ASSERT(NULL != m_pulpReserved);
GPOS_ASSERT(0 < ulChunkSize);
GPOS_ASSERT(0 < ulChunks);
GPOS_ASSERT(0 < ulThresholdLow);
GPOS_ASSERT(ulThresholdLow < ulThresholdHigh);
// guard against OOM during initialization
CAutoP<SlabList> a_pslablistActive;
CAutoP<SlabList> a_pslablistAllocated;
a_pslablistActive = GPOS_NEW(m_pmp) SlabList();
a_pslablistAllocated = GPOS_NEW(m_pmp) SlabList();
a_pslablistActive.Pt()->Init(GPOS_OFFSET(CSlab, m_linkActive));
a_pslablistAllocated.Pt()->Init(GPOS_OFFSET(CSlab, m_linkAllocated));
m_pslablistActive = a_pslablistActive.PtReset();
m_pslablistAllocated = a_pslablistAllocated.PtReset();
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::~CSlabCache
//
// @doc:
// Dtor
//
//---------------------------------------------------------------------------
CSlabCache::~CSlabCache()
{
Teardown();
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::PvAllocate
//
// @doc:
// Allocate object
//
//---------------------------------------------------------------------------
void *
CSlabCache::PvAllocate()
{
GPOS_ASSERT(NULL != m_pmp && "cache has not been initialized");
GPOS_ASSERT(!m_slock.FOwned());
CAutoSpinlock as(m_slock);
SLock(as);
void *pv = PvAllocateSlot();
if (NULL != pv)
{
return pv;
}
GPOS_ASSERT(m_pslablistActive->FEmpty());
// release spinlock to allocate memory for new slab
SUnlock(as);
// reserve memory for new slab
if (!FReserveMem())
{
return NULL;
}
CSlab *pslab = PslabCreate();
if (NULL == pslab)
{
// return reserved memory
UnreserveMem();
return NULL;
}
SLock(as);
m_pslablistAllocated->Append(pslab);
m_pslablistActive->Prepend(pslab);
void *pvAlloc = pslab->PvAllocate(true /*fNewSlab*/);
GPOS_ASSERT(NULL != pvAlloc);
return pvAlloc;
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::PvAllocateSlot
//
// @doc:
// Attempt to allocate slot in active slabs
//
//---------------------------------------------------------------------------
void *
CSlabCache::PvAllocateSlot()
{
GPOS_ASSERT_IMP(m_fThreadSafe, m_slock.FOwned());
CSlab *pslab = m_pslablistActive->PtFirst();
while (NULL != pslab)
{
// check if the slab has an available slot
void *pvAlloc = pslab->PvAllocate(false /*fNewSlab*/);
if (NULL != pvAlloc)
{
return pvAlloc;
}
// pop slab from cache's list
CSlab *pslabPopped = m_pslablistActive->RemoveHead();
GPOS_ASSERT(pslabPopped == pslab);
GPOS_ASSERT(pslabPopped->FFull());
pslabPopped->SetInactive();
pslab = m_pslablistActive->PtFirst();
}
GPOS_ASSERT(m_pslablistActive->FEmpty());
return NULL;
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::Teardown
//
// @doc:
// Release all slabs
//
//---------------------------------------------------------------------------
void
CSlabCache::Teardown()
{
GPOS_ASSERT(!m_slock.FOwned());
if (!m_fTornDown)
{
// release all slabs
while (!m_pslablistAllocated->FEmpty())
{
CSlab *pslab = m_pslablistAllocated->RemoveHead();
Release(pslab);
}
// release slab lists
GPOS_DELETE(m_pslablistAllocated);
GPOS_DELETE(m_pslablistActive);
m_fTornDown = true;
}
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::PslabCreate
//
// @doc:
// Initialize slab
//
//---------------------------------------------------------------------------
CSlab *
CSlabCache::PslabCreate()
{
GPOS_ASSERT(NULL != m_pmp && "cache has not been initialized");
GPOS_ASSERT(!m_slock.FOwned());
// allocate memory for slab
void *pvSlabAlloc = PvAllocateSlab();
if (NULL == pvSlabAlloc)
{
return NULL;
}
// create new slab using placement new
return new(pvSlabAlloc) CSlab(this, m_ulSlots, m_ulSlotSize, m_slock);
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::Release
//
// @doc:
// Release slab
//
//---------------------------------------------------------------------------
void
CSlabCache::Release
(
CSlab *pslab
)
{
GPOS_ASSERT(NULL != m_pmp && "cache has not been initialized");
GPOS_ASSERT(!m_slock.FOwned());
m_pmp->Free(pslab);
UnreserveMem();
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::Shrink
//
// @doc:
// Release unused slabs
//
//---------------------------------------------------------------------------
void
CSlabCache::Shrink
(
CAutoSpinlock &as
)
{
GPOS_ASSERT(NULL != m_pmp && "cache has not been initialized");
GPOS_ASSERT(m_ulThresholdHigh < m_pslablistActive->UlSize());
GPOS_ASSERT_IMP(m_fThreadSafe, m_slock.FOwned());
// store empty slabs to separate list;
// we need to release the spinlock before returning slabs
// to underlying memory pool;
SlabList slablistEmpty;
slablistEmpty.Init(GPOS_OFFSET(CSlab, m_linkActive));
CSlab *pslab = m_pslablistActive->PtFirst();
while (NULL != pslab)
{
// release unused slabs until their number drops below lower threshold
if (pslab->FUnused() && m_ulThresholdLow <= m_ulUnused)
{
DecrUnused();
CSlab *pslabRemove = pslab;
pslab = m_pslablistActive->PtNext(pslab);
m_pslablistActive->Remove(pslabRemove);
m_pslablistAllocated->Remove(pslabRemove);
slablistEmpty.Append(pslabRemove);
}
else
{
pslab = m_pslablistActive->PtNext(pslab);
}
}
// release spinlock to free slabs
SUnlock(as);
while (slablistEmpty.FEmpty())
{
Release(slablistEmpty.RemoveHead());
}
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::FReserveMem
//
// @doc:
// Reserve memory for new slab
//
//---------------------------------------------------------------------------
BOOL
CSlabCache::FReserveMem()
{
// check if capacity has been set
if (ULLONG_MAX == m_ullCapacity)
{
return true;
}
ULONG ulAlloc = UlSlabSize();
if (*m_pulpReserved + ulAlloc <= m_ullCapacity)
{
ULONG_PTR ulpOld = UlpExchangeAdd(m_pulpReserved, ulAlloc);
if (ulpOld + ulAlloc <= m_ullCapacity)
{
return true;
}
UnreserveMem();
}
return false;
}
//---------------------------------------------------------------------------
// @function:
// CSlabCache::UnreserveMem
//
// @doc:
// Update reserved memory with release request
//
//---------------------------------------------------------------------------
void
CSlabCache::UnreserveMem()
{
if (ULLONG_MAX > m_ullCapacity)
{
GPOS_ASSERT(ULLONG_MAX > m_ullCapacity);
ULONG ulAlloc = UlSlabSize();
(void) UlpExchangeAdd(m_pulpReserved, -ulAlloc);
}
}
#ifdef GPOS_DEBUG
//---------------------------------------------------------------------------
// @function:
// CSlabCache::FOwns
//
// @doc:
// Check if slab belongs to cache
//
//---------------------------------------------------------------------------
BOOL
CSlabCache::FOwns
(
CSlab *pslab
)
{
CAutoSpinlock as(m_slock);
SLock(as);
return (GPOS_OK == m_pslablistAllocated->EresFind(pslab));
}
#endif // GPOS_DEBUG
// EOF
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册