提交 d25575f4 编写于 作者: C Carol Eidt
上级 5b4d1ca1
......@@ -2399,7 +2399,7 @@ void Lowering::LowerCompare(GenTree* cmp)
GenTreeIntCon* op2 = cmp->gtGetOp2()->AsIntCon();
ssize_t op2Value = op2->IconValue();
if (m_lsra->isContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
if (IsContainableMemoryOp(op1) && varTypeIsSmall(op1Type) && genTypeCanRepresentValue(op1Type, op2Value))
{
//
// If op1's type is small then try to narrow op2 so it has the same type as op1.
......@@ -2429,7 +2429,7 @@ void Lowering::LowerCompare(GenTree* cmp)
// the result of bool returning calls.
//
if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || m_lsra->isContainableMemoryOp(castOp))
if (castOp->OperIs(GT_CALL, GT_LCL_VAR) || castOp->OperIsLogical() || IsContainableMemoryOp(castOp))
{
assert(!castOp->gtOverflowEx()); // Must not be an overflow checking operation
......@@ -2491,7 +2491,7 @@ void Lowering::LowerCompare(GenTree* cmp)
andOp1->ClearContained();
andOp2->ClearContained();
if (m_lsra->isContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
if (IsContainableMemoryOp(andOp1) && andOp2->IsIntegralConst())
{
//
// For "test" we only care about the bits that are set in the second operand (mask).
......@@ -5404,7 +5404,7 @@ void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
// everything is made explicit by adding casts.
assert(dividend->TypeGet() == divisor->TypeGet());
if (m_lsra->isContainableMemoryOp(divisor) || divisor->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(divisor) || divisor->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(node, divisor);
}
......@@ -5426,7 +5426,7 @@ void Lowering::ContainCheckDivOrMod(GenTreeOp* node)
#endif
// divisor can be an r/m, but the memory indirection must be of the same size as the divide
if (m_lsra->isContainableMemoryOp(divisor) && (divisor->TypeGet() == node->TypeGet()))
if (IsContainableMemoryOp(divisor) && (divisor->TypeGet() == node->TypeGet()))
{
MakeSrcContained(node, divisor);
}
......
......@@ -328,6 +328,12 @@ private:
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);
// Return true if 'node' is a containable memory op.
bool IsContainableMemoryOp(GenTree* node)
{
return m_lsra->isContainableMemoryOp(node);
}
// Makes 'childNode' contained in the 'parentNode'
void MakeSrcContained(GenTreePtr parentNode, GenTreePtr childNode);
......
......@@ -1534,11 +1534,11 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
{
assert(node->OperGet() == GT_MUL);
if (m_lsra->isContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(node, op2);
}
else if (op1->IsCnsNonZeroFltOrDbl() || (m_lsra->isContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)))
else if (op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1)))
{
// Since GT_MUL is commutative, we will try to re-order operands if it is safe to
// generate more efficient code sequence for the case of GT_MUL(op1=memOp, op2=non-memOp)
......@@ -1602,7 +1602,7 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
}
MakeSrcContained(node, imm); // The imm is always contained
if (m_lsra->isContainableMemoryOp(other))
if (IsContainableMemoryOp(other))
{
memOp = other; // memOp may be contained below
}
......@@ -1615,11 +1615,11 @@ void Lowering::ContainCheckMul(GenTreeOp* node)
//
if (memOp == nullptr)
{
if (m_lsra->isContainableMemoryOp(op2) && (op2->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op2))
if (IsContainableMemoryOp(op2) && (op2->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op2))
{
memOp = op2;
}
else if (m_lsra->isContainableMemoryOp(op1) && (op1->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op1))
else if (IsContainableMemoryOp(op1) && (op1->TypeGet() == node->TypeGet()) && IsSafeToContainMem(node, op1))
{
memOp = op1;
}
......@@ -1761,7 +1761,7 @@ void Lowering::ContainCheckCast(GenTreeCast* node)
// U8 -> R8 conversion requires that the operand be in a register.
if (srcType != TYP_ULONG)
{
if (m_lsra->isContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(castOp) || castOp->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(node, castOp);
}
......@@ -1836,7 +1836,7 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp)
{
MakeSrcContained(cmp, otherOp);
}
else if (m_lsra->isContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(cmp, otherOp)))
else if (IsContainableMemoryOp(otherOp) && ((otherOp == op2) || IsSafeToContainMem(cmp, otherOp)))
{
MakeSrcContained(cmp, otherOp);
}
......@@ -1859,7 +1859,7 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp)
// we can treat the MemoryOp as contained.
if (op1Type == op2Type)
{
if (m_lsra->isContainableMemoryOp(op1))
if (IsContainableMemoryOp(op1))
{
MakeSrcContained(cmp, op1);
}
......@@ -1909,11 +1909,11 @@ void Lowering::ContainCheckCompare(GenTreeOp* cmp)
// Note that TEST does not have a r,rm encoding like CMP has but we can still
// contain the second operand because the emitter maps both r,rm and rm,r to
// the same instruction code. This avoids the need to special case TEST here.
if (m_lsra->isContainableMemoryOp(op2))
if (IsContainableMemoryOp(op2))
{
MakeSrcContained(cmp, op2);
}
else if (m_lsra->isContainableMemoryOp(op1) && IsSafeToContainMem(cmp, op1))
else if (IsContainableMemoryOp(op1) && IsSafeToContainMem(cmp, op1))
{
MakeSrcContained(cmp, op1);
}
......@@ -1997,7 +1997,7 @@ bool Lowering::LowerRMWMemOp(GenTreeIndir* storeInd)
// On Xarch RMW operations require the source to be an immediate or in a register.
// Therefore, if we have previously marked the indirOpSource as contained while lowering
// the binary node, we need to reset that now.
if (m_lsra->isContainableMemoryOp(indirOpSource))
if (IsContainableMemoryOp(indirOpSource))
{
indirOpSource->ClearContained();
}
......@@ -2103,7 +2103,7 @@ void Lowering::ContainCheckBinary(GenTreeOp* node)
if (!binOpInRMW)
{
const unsigned operatorSize = genTypeSize(node->TypeGet());
if (m_lsra->isContainableMemoryOp(op2) && (genTypeSize(op2->TypeGet()) == operatorSize))
if (IsContainableMemoryOp(op2) && (genTypeSize(op2->TypeGet()) == operatorSize))
{
directlyEncodable = true;
operand = op2;
......@@ -2111,7 +2111,7 @@ void Lowering::ContainCheckBinary(GenTreeOp* node)
else if (node->OperIsCommutative())
{
if (IsContainableImmed(node, op1) ||
(m_lsra->isContainableMemoryOp(op1) && (genTypeSize(op1->TypeGet()) == operatorSize) &&
(IsContainableMemoryOp(op1) && (genTypeSize(op1->TypeGet()) == operatorSize) &&
IsSafeToContainMem(node, op1)))
{
// If it is safe, we can reverse the order of operands of commutative operations for efficient
......@@ -2155,7 +2155,7 @@ void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
{
other = node->gtIndex;
}
else if (m_lsra->isContainableMemoryOp(node->gtIndex))
else if (IsContainableMemoryOp(node->gtIndex))
{
other = node->gtIndex;
}
......@@ -2166,7 +2166,7 @@ void Lowering::ContainCheckBoundsChk(GenTreeBoundsChk* node)
if (node->gtIndex->TypeGet() == node->gtArrLen->TypeGet())
{
if (m_lsra->isContainableMemoryOp(other))
if (IsContainableMemoryOp(other))
{
MakeSrcContained(node, other);
}
......@@ -2190,7 +2190,7 @@ void Lowering::ContainCheckIntrinsic(GenTreeOp* node)
if (node->gtIntrinsic.gtIntrinsicId == CORINFO_INTRINSIC_Sqrt)
{
GenTree* op1 = node->gtGetOp1();
if (m_lsra->isContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op1) || op1->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(node, op1);
}
......@@ -2288,7 +2288,7 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
// If the index is a constant, mark it as contained.
CheckImmedAndMakeContained(simdNode, op2);
if (m_lsra->isContainableMemoryOp(op1))
if (IsContainableMemoryOp(op1))
{
MakeSrcContained(simdNode, op1);
if (op1->OperGet() == GT_IND)
......@@ -2331,12 +2331,12 @@ void Lowering::ContainCheckFloatBinary(GenTreeOp* node)
// everything is made explicit by adding casts.
assert(op1->TypeGet() == op2->TypeGet());
if (m_lsra->isContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
if (IsContainableMemoryOp(op2) || op2->IsCnsNonZeroFltOrDbl())
{
MakeSrcContained(node, op2);
}
else if (node->OperIsCommutative() &&
(op1->IsCnsNonZeroFltOrDbl() || (m_lsra->isContainableMemoryOp(op1) && IsSafeToContainMem(node, op1))))
(op1->IsCnsNonZeroFltOrDbl() || (IsContainableMemoryOp(op1) && IsSafeToContainMem(node, op1))))
{
// Though we have GT_ADD(op1=memOp, op2=non-memOp, we try to reorder the operands
// as long as it is safe so that the following efficient code sequence is generated:
......
......@@ -40,8 +40,8 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
//
void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
GenTree* op1 = tree->gtGetOp1();
TreeNodeInfo* info = &(tree->gtLsraInfo);
GenTree* op1 = tree->gtGetOp1();
assert(info->dstCount == 0);
if (tree->TypeGet() == TYP_LONG)
......@@ -103,7 +103,7 @@ void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
void LinearScan::TreeNodeInfoInitLclHeap(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
TreeNodeInfo* info = &(tree->gtLsraInfo);
assert(info->dstCount == 1);
......
......@@ -683,7 +683,7 @@ void LinearScan::TreeNodeInfoInit(GenTree* tree)
//
void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
TreeNodeInfo* info = &(tree->gtLsraInfo);
GenTree* op1 = tree->gtGetOp1();
regMaskTP useCandidates = RBM_NONE;
......
......@@ -781,9 +781,9 @@ void LinearScan::TreeNodeInfoInitPutArgSplit(GenTreePutArgSplit* argNode)
//
void LinearScan::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
unsigned size = blkNode->gtBlkSize;
GenTree* source = blkNode->Data();
GenTree* dstAddr = blkNode->Addr();
unsigned size = blkNode->gtBlkSize;
GenTree* source = blkNode->Data();
// Sources are dest address and initVal or source.
// We may require an additional source or temp register for the size.
......
......@@ -709,7 +709,7 @@ void LinearScan::TreeNodeInfoInit(GenTree* tree)
delayUseSrc = op1;
}
else if ((op2 != nullptr) &&
(!tree->OperIsCommutative() || (isContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
(!tree->OperIsCommutative() || (isContainableMemoryOp(op2) && (op2->gtLsraInfo.srcCount == 0))))
{
delayUseSrc = op2;
}
......@@ -838,19 +838,19 @@ bool LinearScan::isRMWRegOper(GenTreePtr tree)
switch (tree->OperGet())
{
// These Opers either support a three op form (i.e. GT_LEA), or do not read/write their first operand
case GT_LEA:
case GT_STOREIND:
case GT_ARR_INDEX:
case GT_STORE_BLK:
case GT_STORE_OBJ:
return false;
case GT_LEA:
case GT_STOREIND:
case GT_ARR_INDEX:
case GT_STORE_BLK:
case GT_STORE_OBJ:
return false;
// x86/x64 does support a three op multiply when op2|op1 is a contained immediate
case GT_MUL:
return (!tree->gtOp.gtOp2->isContainedIntOrIImmed() && !tree->gtOp.gtOp1->isContainedIntOrIImmed());
case GT_MUL:
return (!tree->gtOp.gtOp2->isContainedIntOrIImmed() && !tree->gtOp.gtOp1->isContainedIntOrIImmed());
default:
return true;
default:
return true;
}
}
......@@ -902,8 +902,8 @@ void LinearScan::TreeNodeInfoInitSimple(GenTree* tree)
//
void LinearScan::TreeNodeInfoInitReturn(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
GenTree* op1 = tree->gtGetOp1();
TreeNodeInfo* info = &(tree->gtLsraInfo);
GenTree* op1 = tree->gtGetOp1();
#if !defined(_TARGET_64BIT_)
if (tree->TypeGet() == TYP_LONG)
......@@ -1365,9 +1365,9 @@ void LinearScan::TreeNodeInfoInitCall(GenTreeCall* call)
//
void LinearScan::TreeNodeInfoInitBlockStore(GenTreeBlk* blkNode)
{
GenTree* dstAddr = blkNode->Addr();
unsigned size = blkNode->gtBlkSize;
GenTree* source = blkNode->Data();
GenTree* dstAddr = blkNode->Addr();
unsigned size = blkNode->gtBlkSize;
GenTree* source = blkNode->Data();
// Sources are dest address, initVal or source.
// We may require an additional source or temp register for the size.
......@@ -1754,7 +1754,7 @@ void LinearScan::TreeNodeInfoInitPutArgStk(GenTreePutArgStk* putArgStk)
//
void LinearScan::TreeNodeInfoInitLclHeap(GenTree* tree)
{
TreeNodeInfo* info = &(tree->gtLsraInfo);
TreeNodeInfo* info = &(tree->gtLsraInfo);
info->srcCount = 1;
assert(info->dstCount == 1);
......@@ -2203,7 +2203,8 @@ void LinearScan::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
}
else
{
assert(simdTree->gtSIMDBaseType == TYP_INT && compiler->getSIMDInstructionSet() >= InstructionSet_SSE3_4);
assert(simdTree->gtSIMDBaseType == TYP_INT &&
compiler->getSIMDInstructionSet() >= InstructionSet_SSE3_4);
// No need to set isInternalRegDelayFree since targetReg is a
// an int type reg and guaranteed to be different from xmm/ymm
......@@ -2358,7 +2359,8 @@ void LinearScan::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
}
else
#endif
if ((compiler->getSIMDInstructionSet() == InstructionSet_AVX) || (simdTree->gtSIMDBaseType == TYP_ULONG))
if ((compiler->getSIMDInstructionSet() == InstructionSet_AVX) ||
(simdTree->gtSIMDBaseType == TYP_ULONG))
{
info->internalFloatCount = 2;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册