未验证 提交 36e3cef1 编写于 作者: B Bruce Forstall 提交者: GitHub

Fix some memory attributions (#51065)

* Fix some memory attributions

This doesn't change the memory stats greatly, overall.

* Formatting
上级 49369027
......@@ -541,12 +541,14 @@ void Compiler::optAssertionInit(bool isLocalProp)
if (!isLocalProp)
{
optValueNumToAsserts = new (getAllocator()) ValueNumToAssertsMap(getAllocator());
optValueNumToAsserts =
new (getAllocator(CMK_AssertionProp)) ValueNumToAssertsMap(getAllocator(CMK_AssertionProp));
}
if (optAssertionDep == nullptr)
{
optAssertionDep = new (this, CMK_AssertionProp) JitExpandArray<ASSERT_TP>(getAllocator(), max(1, lvaCount));
optAssertionDep =
new (this, CMK_AssertionProp) JitExpandArray<ASSERT_TP>(getAllocator(CMK_AssertionProp), max(1, lvaCount));
}
optAssertionTraitsInit(optMaxAssertionCount);
......
......@@ -47,6 +47,7 @@ CompMemKindMacro(DebugInfo)
CompMemKindMacro(DebugOnly)
CompMemKindMacro(Codegen)
CompMemKindMacro(LoopOpt)
CompMemKindMacro(LoopClone)
CompMemKindMacro(LoopHoist)
CompMemKindMacro(Unknown)
CompMemKindMacro(RangeCheck)
......
......@@ -1541,7 +1541,7 @@ bool LIR::Range::CheckLIR(Compiler* compiler, bool checkUnusedValues) const
slowNode = slowNode->gtNext;
}
SmallHashTable<GenTree*, bool, 32> unusedDefs(compiler->getAllocator());
SmallHashTable<GenTree*, bool, 32> unusedDefs(compiler->getAllocatorDebugOnly());
bool pastPhis = false;
GenTree* prev = nullptr;
......
......@@ -215,7 +215,7 @@ bool Compiler::optCSE_canSwap(GenTree* op1, GenTree* op2)
// If we haven't setup cseMaskTraits, do it now
if (cseMaskTraits == nullptr)
{
cseMaskTraits = new (getAllocator()) BitVecTraits(optCSECandidateCount, this);
cseMaskTraits = new (getAllocator(CMK_CSE)) BitVecTraits(optCSECandidateCount, this);
}
optCSE_MaskData op1MaskData;
......@@ -967,7 +967,7 @@ void Compiler::optCseUpdateCheckedBoundMap(GenTree* compare)
if (optCseCheckedBoundMap == nullptr)
{
// Allocate map on first use.
optCseCheckedBoundMap = new (getAllocator()) NodeToNodeMap(getAllocator());
optCseCheckedBoundMap = new (getAllocator(CMK_CSE)) NodeToNodeMap(getAllocator());
}
optCseCheckedBoundMap->Set(bound, compare);
......@@ -997,7 +997,7 @@ void Compiler::optValnumCSE_InitDataFlow()
const unsigned bitCount = (optCSECandidateCount * 2) + 1;
// Init traits and cseCallKillsMask bitvectors.
cseLivenessTraits = new (getAllocator()) BitVecTraits(bitCount, this);
cseLivenessTraits = new (getAllocator(CMK_CSE)) BitVecTraits(bitCount, this);
cseCallKillsMask = BitVecOps::MakeEmpty(cseLivenessTraits);
for (unsigned inx = 0; inx < optCSECandidateCount; inx++)
{
......
......@@ -1805,7 +1805,7 @@ private:
// Seed the loop block set and worklist with the entry block.
loopBlocks.Reset(entry->bbNum);
jitstd::list<BasicBlock*> worklist(comp->getAllocator());
jitstd::list<BasicBlock*> worklist(comp->getAllocator(CMK_LoopOpt));
worklist.push_back(entry);
while (!worklist.empty())
......@@ -3781,7 +3781,7 @@ void Compiler::optUnrollLoops()
/* Create the unrolled loop statement list */
{
BlockToBlockMap blockMap(getAllocator());
BlockToBlockMap blockMap(getAllocator(CMK_LoopOpt));
BasicBlock* insertAfter = bottom;
for (lval = lbeg; totalIter; totalIter--)
......@@ -4725,7 +4725,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext
}
else if (loop->lpFlags & LPFLG_ARRLEN_LIMIT)
{
ArrIndex* index = new (getAllocator()) ArrIndex(getAllocator());
ArrIndex* index = new (getAllocator(CMK_LoopClone)) ArrIndex(getAllocator(CMK_LoopClone));
if (!loop->lpArrLenLimit(this, index))
{
JITDUMP("> ArrLen not matching");
......@@ -4768,8 +4768,8 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext
// limit <= mdArrLen
LcMdArrayOptInfo* mdArrInfo = optInfo->AsLcMdArrayOptInfo();
LC_Condition cond(GT_LE, LC_Expr(ident),
LC_Expr(LC_Ident(LC_Array(LC_Array::MdArray,
mdArrInfo->GetArrIndexForDim(getAllocator()),
LC_Expr(LC_Ident(LC_Array(LC_Array::MdArray, mdArrInfo->GetArrIndexForDim(
getAllocator(CMK_LoopClone)),
mdArrInfo->dim, LC_Array::None))));
context->EnsureConditions(loopNum)->Push(cond);
}
......@@ -4895,7 +4895,7 @@ bool Compiler::optDeriveLoopCloningConditions(unsigned loopNum, LoopCloneContext
//
bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* context)
{
JitExpandArrayStack<LC_Deref*> nodes(getAllocator());
JitExpandArrayStack<LC_Deref*> nodes(getAllocator(CMK_LoopClone));
int maxRank = -1;
// Get the dereference-able arrays.
......@@ -4913,7 +4913,7 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
LC_Deref* node = LC_Deref::Find(&nodes, array.arrIndex->arrLcl);
if (node == nullptr)
{
node = new (getAllocator()) LC_Deref(array, 0 /*level*/);
node = new (getAllocator(CMK_LoopClone)) LC_Deref(array, 0 /*level*/);
nodes.Push(node);
}
......@@ -4922,11 +4922,11 @@ bool Compiler::optComputeDerefConditions(unsigned loopNum, LoopCloneContext* con
unsigned rank = (unsigned)array.GetDimRank();
for (unsigned i = 0; i < rank; ++i)
{
node->EnsureChildren(getAllocator());
node->EnsureChildren(getAllocator(CMK_LoopClone));
LC_Deref* tmp = node->Find(array.arrIndex->indLcls[i]);
if (tmp == nullptr)
{
tmp = new (getAllocator()) LC_Deref(array, node->level + 1);
tmp = new (getAllocator(CMK_LoopClone)) LC_Deref(array, node->level + 1);
node->children->Push(tmp);
}
......@@ -5269,7 +5269,7 @@ void Compiler::optCloneLoops()
unsigned optStaticallyOptimizedLoops = 0;
LoopCloneContext context(optLoopCount, getAllocator());
LoopCloneContext context(optLoopCount, getAllocator(CMK_LoopClone));
// Obtain array optimization candidates in the context.
optObtainLoopCloningOpts(&context);
......@@ -5477,7 +5477,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context)
BasicBlock* newFirst = nullptr;
BasicBlock* newBot = nullptr;
BlockToBlockMap* blockMap = new (getAllocator()) BlockToBlockMap(getAllocator());
BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone));
for (BasicBlock* blk = loop.lpFirst; blk != loop.lpBottom->bbNext; blk = blk->bbNext)
{
BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true);
......@@ -5692,7 +5692,7 @@ void Compiler::optEnsureUniqueHead(unsigned loopInd, BasicBlock::weight_t ambien
BlockSetOps::Assign(this, h2->bbReach, e->bbReach);
// Redirect paths from preds of "e" to go to "h2" instead of "e".
BlockToBlockMap* blockMap = new (getAllocator()) BlockToBlockMap(getAllocator());
BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone));
blockMap->Set(e, h2);
for (flowList* predEntry = e->bbPreds; predEntry; predEntry = predEntry->flNext)
......@@ -8820,7 +8820,7 @@ bool Compiler::optIsStackLocalInvariant(unsigned loopNum, unsigned lclNum)
//
Compiler::fgWalkResult Compiler::optCanOptimizeByLoopCloning(GenTree* tree, LoopCloneVisitorInfo* info)
{
ArrIndex arrIndex(getAllocator());
ArrIndex arrIndex(getAllocator(CMK_LoopClone));
// Check if array index can be optimized.
if (optReconstructArrIndex(tree, &arrIndex, BAD_VAR_NUM))
......
......@@ -178,7 +178,7 @@ void Compiler::unwindBegPrologCFI()
unwindGetFuncLocations(func, false, &func->coldStartLoc, &func->coldEndLoc);
}
func->cfiCodes = new (getAllocator()) CFICodeVector(getAllocator());
func->cfiCodes = new (getAllocator(CMK_UnwindInfo)) CFICodeVector(getAllocator());
#endif // FEATURE_EH_FUNCLETS
}
......
......@@ -5847,8 +5847,8 @@ struct ValueNumberState
}
ValueNumberState(Compiler* comp)
: m_toDoAllPredsDone(comp->getAllocator(), /*minSize*/ 4)
, m_toDoNotAllPredsDone(comp->getAllocator(), /*minSize*/ 4)
: m_toDoAllPredsDone(comp->getAllocator(CMK_ValueNumber), /*minSize*/ 4)
, m_toDoNotAllPredsDone(comp->getAllocator(CMK_ValueNumber), /*minSize*/ 4)
, m_comp(comp)
, m_visited(new (comp, CMK_ValueNumber) BYTE[comp->fgBBNumMax + 1]())
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册