未验证 提交 7890ef1a 编写于 作者: J Jakob Botsch Nielsen 提交者: GitHub

Split out and generalize tailcall IR validation/tailcall profile adjustments (#69941)

* Split the IR validation after tailcalls into a separate function.
  Previously it was intertwined with updating of profile weights for
  follow-up blocks.
* Generalize the validation to use a tree walk and handle more cases.
  This fixes an assertion failure seen in some PGO runs.
* Generalize the updating of profile weights for follow-up blocks.
  Previously this was only updating profile weights for one follow up
  blocks, but there can be an arbitrary number of successor blocks due
  to inlining.

Fix #69939
上级 bc07cf2a
......@@ -5649,6 +5649,7 @@ private:
GenTree* getTokenHandleTree(CORINFO_RESOLVED_TOKEN* pResolvedToken, bool parent);
GenTree* fgMorphPotentialTailCall(GenTreeCall* call);
void fgValidateIRForTailCall(GenTreeCall* call);
GenTree* fgGetStubAddrArg(GenTreeCall* call);
unsigned fgGetArgParameterLclNum(GenTreeCall* call, CallArg* arg);
void fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCall* recursiveTailCall);
......
......@@ -6719,9 +6719,11 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
#endif
}
fgValidateIRForTailCall(call);
// If this block has a flow successor, make suitable updates.
//
BasicBlock* const nextBlock = compCurBB->GetUniqueSucc();
BasicBlock* nextBlock = compCurBB->GetUniqueSucc();
if (nextBlock == nullptr)
{
......@@ -6735,129 +6737,48 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
//
fgRemoveRefPred(nextBlock, compCurBB);
// Adjust profile weights.
// Adjust profile weights of the successor blocks.
//
// Note if this is a tail call to loop, further updates
// are needed once we install the loop edge.
//
if (compCurBB->hasProfileWeight() && nextBlock->hasProfileWeight())
BasicBlock* curBlock = compCurBB;
if (curBlock->hasProfileWeight())
{
// Since we have linear flow we can update the next block weight.
//
weight_t const blockWeight = compCurBB->bbWeight;
weight_t const nextWeight = nextBlock->bbWeight;
weight_t const newNextWeight = nextWeight - blockWeight;
weight_t weightLoss = curBlock->bbWeight;
// If the math would result in a negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextWeight >= 0)
while (nextBlock->hasProfileWeight())
{
// Note if we'd already morphed the IR in nextblock we might
// have done something profile sensitive that we should arguably reconsider.
// Since we have linear flow we can update the next block weight.
//
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum,
nextWeight, newNextWeight);
weight_t const nextWeight = nextBlock->bbWeight;
weight_t const newNextWeight = nextWeight - weightLoss;
nextBlock->setBBProfileWeight(newNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
nextBlock->bbNum, nextWeight, compCurBB->bbNum, blockWeight);
}
// If nextBlock is not a BBJ_RETURN, it should have a unique successor that
// is a BBJ_RETURN, as we allow a little bit of flow after a tail call.
//
if (nextBlock->bbJumpKind != BBJ_RETURN)
{
BasicBlock* retBlock = nextBlock->GetUniqueSucc();
// Check if we have a sequence of GT_ASG blocks where the same variable is assigned
// to temp locals over and over.
// Also allow casts on the RHSs of the assignments, and blocks with GT_NOPs.
// If the math would result in a negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
// { GT_ASG(t_0, GT_CALL(...)) }
// { GT_ASG(t_1, t0) } (with casts on rhs potentially)
// ...
// { GT_ASG(t_n, t_(n - 1)) }
// { GT_RET t_n }
//
if (retBlock->bbJumpKind != BBJ_RETURN)
if (newNextWeight >= 0)
{
// Make sure the block has a single statement
assert(nextBlock->firstStmt() == nextBlock->lastStmt());
// And the root node is "ASG(LCL_VAR, LCL_VAR)"
GenTree* asgNode = nextBlock->firstStmt()->GetRootNode();
assert(asgNode->OperIs(GT_ASG));
unsigned lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
while (retBlock->bbJumpKind != BBJ_RETURN)
{
#ifdef DEBUG
Statement* nonEmptyStmt = nullptr;
for (Statement* const stmt : retBlock->Statements())
{
// Ignore NOP statements
if (!stmt->GetRootNode()->OperIs(GT_NOP))
{
// Only a single non-NOP statement is allowed
assert(nonEmptyStmt == nullptr);
nonEmptyStmt = stmt;
}
}
if (nonEmptyStmt != nullptr)
{
asgNode = nonEmptyStmt->GetRootNode();
if (!asgNode->OperIs(GT_NOP))
{
assert(asgNode->OperIs(GT_ASG));
GenTree* rhs = asgNode->gtGetOp2();
while (rhs->OperIs(GT_CAST))
{
assert(!rhs->gtOverflow());
rhs = rhs->gtGetOp1();
}
// Note if we'd already morphed the IR in nextblock we might
// have done something profile sensitive that we should arguably reconsider.
//
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n", nextBlock->bbNum,
nextWeight, newNextWeight);
assert(lcl == rhs->AsLclVarCommon()->GetLclNum());
lcl = asgNode->gtGetOp1()->AsLclVarCommon()->GetLclNum();
}
}
#endif
retBlock = retBlock->GetUniqueSucc();
}
nextBlock->setBBProfileWeight(newNextWeight);
}
assert(retBlock->bbJumpKind == BBJ_RETURN);
if (retBlock->hasProfileWeight())
else
{
// Do similar updates here.
//
weight_t const nextNextWeight = retBlock->bbWeight;
weight_t const newNextNextWeight = nextNextWeight - blockWeight;
// If the math would result in an negative weight then there's
// no local repair we can do; just leave things inconsistent.
//
if (newNextNextWeight >= 0)
{
JITDUMP("Reducing profile weight of " FMT_BB " from " FMT_WT " to " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, newNextNextWeight);
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
nextBlock->bbNum, nextWeight, compCurBB->bbNum, weightLoss);
}
retBlock->setBBProfileWeight(newNextNextWeight);
}
else
{
JITDUMP("Not reducing profile weight of " FMT_BB " as its weight " FMT_WT
" is less than direct flow pred " FMT_BB " weight " FMT_WT "\n",
retBlock->bbNum, nextNextWeight, compCurBB->bbNum, blockWeight);
}
curBlock = nextBlock;
nextBlock = curBlock->GetUniqueSucc();
if (nextBlock == nullptr)
{
break;
}
}
}
......@@ -7066,6 +6987,168 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
return result;
}
//------------------------------------------------------------------------
// fgValidateIRForTailCall:
// Validate that the IR looks ok to perform a tailcall.
//
// Arguments:
// call - The call that we are dispatching as a tailcall.
//
// Notes:
// This function needs to handle somewhat complex IR that appears after
// tailcall candidates due to inlining.
//
void Compiler::fgValidateIRForTailCall(GenTreeCall* call)
{
#ifdef DEBUG
class TailCallIRValidatorVisitor final : public GenTreeVisitor<TailCallIRValidatorVisitor>
{
GenTreeCall* m_tailcall;
GenTree* m_prevVal;
public:
enum
{
DoPostOrder = true,
UseExecutionOrder = true,
};
TailCallIRValidatorVisitor(Compiler* comp, GenTreeCall* tailcall)
: GenTreeVisitor(comp), m_tailcall(tailcall), m_prevVal(nullptr)
{
}
fgWalkResult PostOrderVisit(GenTree** use, GenTree* user)
{
GenTree* tree = *use;
// Wait until we get to the actual call...
if (m_prevVal == nullptr)
{
if (tree == m_tailcall)
{
m_prevVal = m_tailcall;
}
return WALK_CONTINUE;
}
if (tree->OperIs(GT_RETURN))
{
assert((tree->TypeIs(TYP_VOID) || ValidateUse(tree->gtGetOp1())) &&
"Expected return to be result of tailcall");
return WALK_ABORT;
}
// GT_NOP might appear due to assignments that end up as
// self-assignments, which get morphed to GT_NOP.
if (tree->OperIs(GT_NOP))
{
}
// No-op casts may appear due to normalization during inlining. Example:
// * RETURN int
// \--* CAST int <- bool <- int
// \--* CALL int Attribute.IsDefined (with gtReturnType = TYP_BOOL)
// +--* LCL_VAR ref V00 arg0
// +--* LCL_VAR ref V01 arg1
// \--* CNS_INT int 1
else if (tree->OperIs(GT_CAST))
{
assert(ValidateUse(tree->AsCast()->CastOp()) && "Expected cast op to be from result of tailcall");
assert((tree->AsCast()->gtCastType == m_tailcall->gtReturnType) &&
"Expected cast after tailcall to be no-op");
m_prevVal = tree;
}
// We might see arbitrary chains of assignments that trivially
// propagate the result. Example:
//
// * ASG ref
// +--* LCL_VAR ref V05 tmp5
// \--* CALL ref CultureInfo.InitializeUserDefaultUICulture
// (in a new statement/BB)
// * ASG ref
// +--* LCL_VAR ref V02 tmp2
// \--* LCL_VAR ref V05 tmp5
// (in a new statement/BB)
// * RETURN ref
// \--* LCL_VAR ref V02 tmp2
//
else if (tree->OperIs(GT_ASG))
{
assert(tree->gtGetOp1()->OperIs(GT_LCL_VAR) && ValidateUse(tree->gtGetOp2()) &&
"Expected LHS of assignment to be local and RHS of assignment to be result of tailcall");
m_prevVal = tree->gtGetOp1();
}
else if (tree->OperIs(GT_LCL_VAR))
{
assert((ValidateUse(tree) || (user->OperIs(GT_ASG) && user->gtGetOp1() == tree)) &&
"Expected use of local to be tailcall value or LHS of assignment");
}
else
{
DISPTREE(tree);
assert(!"Unexpected tree op after call marked as tailcall");
}
return WALK_CONTINUE;
}
bool ValidateUse(GenTree* node)
{
if (m_prevVal->OperIs(GT_LCL_VAR))
{
return node->OperIs(GT_LCL_VAR) &&
(node->AsLclVar()->GetLclNum() == m_prevVal->AsLclVar()->GetLclNum());
}
else if (m_prevVal == m_tailcall)
{
if (node == m_tailcall)
{
return true;
}
// If we do not use the call value directly we might have
// passed this function's ret buffer arg, so verify that is
// being used.
CallArg* retBufferArg = m_tailcall->gtArgs.GetRetBufferArg();
if (retBufferArg != nullptr)
{
GenTree* retBufferNode = retBufferArg->GetNode();
return retBufferNode->OperIs(GT_LCL_VAR) &&
(retBufferNode->AsLclVar()->GetLclNum() == m_compiler->info.compRetBuffArg) &&
node->OperIs(GT_LCL_VAR) &&
(node->AsLclVar()->GetLclNum() == m_compiler->info.compRetBuffArg);
}
return false;
}
else
{
return node == m_prevVal;
}
}
};
TailCallIRValidatorVisitor visitor(this, call);
for (Statement* stmt = compCurStmt; stmt != nullptr; stmt = stmt->GetNextStmt())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
BasicBlock* bb = compCurBB;
while (!bb->KindIs(BBJ_RETURN))
{
bb = bb->GetUniqueSucc();
assert((bb != nullptr) && "Expected straight flow after tailcall");
for (Statement* stmt : bb->Statements())
{
visitor.WalkTree(stmt->GetRootNodePointer(), nullptr);
}
}
#endif
}
//------------------------------------------------------------------------
// fgMorphTailCallViaHelpers: Transform the given GT_CALL tree for tailcall code
// generation.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册