lower.h 22.9 KB
Newer Older
D
dotnet-bot 已提交
1 2
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
3 4 5 6 7 8 9 10

/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX                                                                           XX
XX                               Lower                                       XX
XX                                                                           XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
12 13 14 15 16 17
*/

#ifndef _LOWER_H_
#define _LOWER_H_

#include "compiler.h"
18
#include "phase.h"
19
#include "lsra.h"
20
#include "sideeffects.h"
21

22
class Lowering final : public Phase
23 24 25
{
public:
    inline Lowering(Compiler* compiler, LinearScanInterface* lsra)
26
        : Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM)
27
    {
28
        m_lsra = (LinearScan*)lsra;
29 30
        assert(m_lsra);
    }
31
    virtual PhaseStatus DoPhase() override;
32

33 34 35 36 37 38 39 40 41 42 43
    // This variant of LowerRange is called from outside of the main Lowering pass,
    // so it creates its own instance of Lowering to do so.
    void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range)
    {
        Lowering lowerer(comp, m_lsra);
        lowerer.m_block = block;

        lowerer.LowerRange(range);
    }

private:
44
    // LowerRange handles new code that is introduced by or after Lowering.
45
    void LowerRange(LIR::ReadOnlyRange& range)
46 47 48 49 50 51
    {
        for (GenTree* newNode : range)
        {
            LowerNode(newNode);
        }
    }
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
    void LowerRange(GenTree* firstNode, GenTree* lastNode)
    {
        LIR::ReadOnlyRange range(firstNode, lastNode);
        LowerRange(range);
    }

    // ContainCheckRange handles new code that is introduced by or after Lowering,
    // and that is known to be already in Lowered form.
    void ContainCheckRange(LIR::ReadOnlyRange& range)
    {
        for (GenTree* newNode : range)
        {
            ContainCheckNode(newNode);
        }
    }
    void ContainCheckRange(GenTree* firstNode, GenTree* lastNode)
    {
        LIR::ReadOnlyRange range(firstNode, lastNode);
        ContainCheckRange(range);
    }

    void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree)
    {
        LIR::Range range = LIR::SeqTree(comp, tree);
        ContainCheckRange(range);
        BlockRange().InsertBefore(insertionPoint, std::move(range));
    }

    void ContainCheckNode(GenTree* node);
81

C
Carol Eidt 已提交
82 83 84 85
    void ContainCheckDivOrMod(GenTreeOp* node);
    void ContainCheckReturnTrap(GenTreeOp* node);
    void ContainCheckArrOffset(GenTreeArrOffs* node);
    void ContainCheckLclHeap(GenTreeOp* node);
S
Sergey Andreenko 已提交
86
    void ContainCheckRet(GenTreeUnOp* ret);
87 88
    void ContainCheckJTrue(GenTreeOp* node);

89
    void ContainCheckBitCast(GenTree* node);
90 91
    void ContainCheckCallOperands(GenTreeCall* call);
    void ContainCheckIndir(GenTreeIndir* indirNode);
92
    void ContainCheckStoreIndir(GenTreeStoreInd* indirNode);
C
Carol Eidt 已提交
93 94
    void ContainCheckMul(GenTreeOp* node);
    void ContainCheckShiftRotate(GenTreeOp* node);
95
    void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const;
C
Carol Eidt 已提交
96 97
    void ContainCheckCast(GenTreeCast* node);
    void ContainCheckCompare(GenTreeOp* node);
98
    void ContainCheckBinary(GenTreeOp* node);
C
Carol Eidt 已提交
99
    void ContainCheckBoundsChk(GenTreeBoundsChk* node);
100
#ifdef TARGET_XARCH
C
Carol Eidt 已提交
101 102
    void ContainCheckFloatBinary(GenTreeOp* node);
    void ContainCheckIntrinsic(GenTreeOp* node);
103
#endif // TARGET_XARCH
C
Carol Eidt 已提交
104 105 106
#ifdef FEATURE_SIMD
    void ContainCheckSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
107
#ifdef FEATURE_HW_INTRINSICS
108
    void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr);
109 110
    void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node);
#endif // FEATURE_HW_INTRINSICS
C
Carol Eidt 已提交
111

112 113 114
#ifdef DEBUG
    static void CheckCallArg(GenTree* arg);
    static void CheckCall(GenTreeCall* call);
115
    static void CheckNode(Compiler* compiler, GenTree* node);
116 117 118 119 120
    static bool CheckBlock(Compiler* compiler, BasicBlock* block);
#endif // DEBUG

    void LowerBlock(BasicBlock* block);
    GenTree* LowerNode(GenTree* node);
121

122 123 124
    // ------------------------------
    // Call Lowering
    // ------------------------------
125
    void LowerCall(GenTree* call);
126
#ifndef TARGET_64BIT
127 128 129 130
    GenTree* DecomposeLongCompare(GenTree* cmp);
#endif
    GenTree* OptimizeConstCompare(GenTree* cmp);
    GenTree* LowerCompare(GenTree* cmp);
M
Mike Danes 已提交
131
    GenTree* LowerJTrue(GenTreeOp* jtrue);
132
    GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
133
    void LowerJmpMethod(GenTree* jmp);
S
Sergey Andreenko 已提交
134
    void LowerRet(GenTreeUnOp* ret);
135
    void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
136
    void LowerRetStruct(GenTreeUnOp* ret);
137
    void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret);
138
    void LowerCallStruct(GenTreeCall* call);
139
    void LowerStoreSingleRegCallStruct(GenTreeBlk* store);
140 141 142
#if !defined(WINDOWS_AMD64_ABI)
    GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const;
#endif // WINDOWS_AMD64_ABI
143 144 145 146
    GenTree* LowerDelegateInvoke(GenTreeCall* call);
    GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
    GenTree* LowerDirectCall(GenTreeCall* call);
    GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call);
147
    GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget);
148
    void LowerFastTailCall(GenTreeCall* callNode);
149 150 151 152
    void RehomeArgForFastTailCall(unsigned int lclNum,
                                  GenTree*     insertTempBefore,
                                  GenTree*     lookForUsesStart,
                                  GenTreeCall* callNode);
153 154 155 156
    void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint);
    GenTree* LowerVirtualVtableCall(GenTreeCall* call);
    GenTree* LowerVirtualStubCall(GenTreeCall* call);
    void LowerArgsForCall(GenTreeCall* call);
157 158 159
    void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode);
    GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type);
    void LowerArg(GenTreeCall* call, GenTree** ppTree);
160
#ifdef TARGET_ARMARCH
161
    GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info);
162 163 164
    GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum);
#endif

165 166 167
    void InsertPInvokeCallProlog(GenTreeCall* call);
    void InsertPInvokeCallEpilog(GenTreeCall* call);
    void InsertPInvokeMethodProlog();
168
    void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr));
169 170 171 172 173 174 175 176
    GenTree* SetGCState(int cns);
    GenTree* CreateReturnTrapSeq();
    enum FrameLinkAction
    {
        PushFrame,
        PopFrame
    };
    GenTree* CreateFrameLinkUpdate(FrameLinkAction);
177 178
    GenTree* AddrGen(ssize_t addr);
    GenTree* AddrGen(void* addr);
179

180
    GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL)
181
    {
182
        return comp->gtNewOperNode(GT_IND, type, tree);
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
    }

    GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL)
    {
        return comp->gtNewPhysRegNode(reg, type);
    }

    GenTree* ThisReg(GenTreeCall* call)
    {
        return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF);
    }

    GenTree* Offset(GenTree* base, unsigned offset)
    {
        var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
198
        return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
199 200
    }

201 202 203 204 205 206
    GenTree* OffsetByIndex(GenTree* base, GenTree* index)
    {
        var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
        return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0);
    }

207 208 209 210 211 212
    GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale)
    {
        var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
        return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0);
    }

213
    // Replace the definition of the given use with a lclVar, allocating a new temp
214 215
    // if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node.
    GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM)
216 217 218 219
    {
        GenTree* oldUseNode = use.Def();
        if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM))
        {
220 221 222
            GenTree* assign;
            use.ReplaceWithLclVar(comp, tempNum, &assign);

223 224
            GenTree* newUseNode = use.Def();
            ContainCheckRange(oldUseNode->gtNext, newUseNode);
225 226 227 228 229 230 231

            // We need to lower the LclVar and assignment since there may be certain
            // types or scenarios, such as TYP_SIMD12, that need special handling

            LowerNode(assign);
            LowerNode(newUseNode);

232
            return newUseNode->AsLclVar();
233
        }
234
        return oldUseNode->AsLclVar();
235 236
    }

237
    // return true if this call target is within range of a pc-rel call on the machine
238
    bool IsCallTargetInRange(void* addr);
239

240
#if defined(TARGET_XARCH)
S
sivarv 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253
    GenTree* PreferredRegOptionalOperand(GenTree* tree);

    // ------------------------------------------------------------------
    // SetRegOptionalBinOp - Indicates which of the operands of a bin-op
    // register requirement is optional. Xarch instruction set allows
    // either of op1 or op2 of binary operation (e.g. add, mul etc) to be
    // a memory operand.  This routine provides info to register allocator
    // which of its operands optionally require a register.  Lsra might not
    // allocate a register to RefTypeUse positions of such operands if it
    // is beneficial. In such a case codegen will treat them as memory
    // operands.
    //
    // Arguments:
254 255 256
    //     tree  -             Gentree of a binary operation.
    //     isSafeToMarkOp1     True if it's safe to mark op1 as register optional
    //     isSafeToMarkOp2     True if it's safe to mark op2 as register optional
S
sivarv 已提交
257
    //
258
    // Returns
259 260
    //     The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2
    //     by calling IsSafeToContainMem.
261
    //
S
sivarv 已提交
262 263 264
    // Note: On xarch at most only one of the operands will be marked as
    // reg optional, even when both operands could be considered register
    // optional.
265
    void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2)
S
sivarv 已提交
266 267 268
    {
        assert(GenTree::OperIsBinary(tree->OperGet()));

269 270
        GenTree* const op1 = tree->gtGetOp1();
        GenTree* const op2 = tree->gtGetOp2();
S
sivarv 已提交
271

272 273
        const unsigned operatorSize = genTypeSize(tree->TypeGet());

274 275 276
        const bool op1Legal =
            isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet()));
        const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet()));
277

C
Carol Eidt 已提交
278
        GenTree* regOptionalOperand = nullptr;
279
        if (op1Legal)
S
sivarv 已提交
280
        {
C
Carol Eidt 已提交
281
            regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1;
S
sivarv 已提交
282
        }
283
        else if (op2Legal)
S
sivarv 已提交
284
        {
C
Carol Eidt 已提交
285 286 287 288 289
            regOptionalOperand = op2;
        }
        if (regOptionalOperand != nullptr)
        {
            regOptionalOperand->SetRegOptional();
S
sivarv 已提交
290 291
        }
    }
292
#endif // defined(TARGET_XARCH)
C
Carol Eidt 已提交
293

C
Carol Eidt 已提交
294
    // Per tree node member functions
295
    void LowerStoreIndirCommon(GenTreeStoreInd* ind);
296
    void LowerIndir(GenTreeIndir* ind);
297
    void LowerStoreIndir(GenTreeStoreInd* node);
298
    GenTree* LowerAdd(GenTreeOp* node);
C
Carol Eidt 已提交
299 300 301 302
    bool LowerUnsignedDivOrMod(GenTreeOp* divMod);
    GenTree* LowerConstIntDivOrMod(GenTree* node);
    GenTree* LowerSignedDivOrMod(GenTree* node);
    void LowerBlockStore(GenTreeBlk* blkNode);
303
    void LowerBlockStoreCommon(GenTreeBlk* blkNode);
M
Mike Danes 已提交
304
    void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr);
C
Carol Eidt 已提交
305
    void LowerPutArgStk(GenTreePutArgStk* tree);
306

307
    bool TryCreateAddrMode(GenTree* addr, bool isContainable);
308

309 310
    bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode);

311
    GenTree* LowerSwitch(GenTree* node);
M
Mike Danes 已提交
312 313 314
    bool TryLowerSwitchToBitTest(
        BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue);

315
    void LowerCast(GenTree* node);
316 317

#if !CPU_LOAD_STORE_ARCH
318
    bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd);
319 320
    bool IsBinOpInRMWStoreInd(GenTree* tree);
    bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource);
C
Carol Eidt 已提交
321
    bool LowerRMWMemOp(GenTreeIndir* storeInd);
322
#endif
C
Carol Eidt 已提交
323 324

    void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node);
325
    bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc);
326
    void LowerStoreLoc(GenTreeLclVarCommon* tree);
327
    GenTree* LowerArrElem(GenTree* node);
328
    void LowerRotate(GenTree* tree);
329
    void LowerShift(GenTreeOp* shift);
330 331 332
#ifdef FEATURE_SIMD
    void LowerSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
333 334
#ifdef FEATURE_HW_INTRINSICS
    void LowerHWIntrinsic(GenTreeHWIntrinsic* node);
335
    void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition);
336
    void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp);
337
    void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node);
338 339
    void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
340
    void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node);
341
    void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node);
342 343
    void LowerHWIntrinsicGetElement(GenTreeHWIntrinsic* node);
    void LowerHWIntrinsicWithElement(GenTreeHWIntrinsic* node);
344
#elif defined(TARGET_ARM64)
345
    bool IsValidConstForMovImm(GenTreeHWIntrinsic* node);
346
    void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node);
347
#endif // !TARGET_XARCH && !TARGET_ARM64
348

349 350 351 352 353 354 355 356 357 358 359 360 361
    union VectorConstant {
        int8_t   i8[32];
        uint8_t  u8[32];
        int16_t  i16[16];
        uint16_t u16[16];
        int32_t  i32[8];
        uint32_t u32[8];
        int64_t  i64[4];
        uint64_t u64[4];
        float    f32[8];
        double   f64[4];
    };

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
    //----------------------------------------------------------------------------------------------
    // VectorConstantIsBroadcastedI64: Check N i64 elements in a constant vector for equality
    //
    //  Arguments:
    //     vecCns  - Constant vector
    //     count   - Amount of i64 components to compare
    //
    //  Returns:
    //     true if N i64 elements of the given vector are equal
    static bool VectorConstantIsBroadcastedI64(VectorConstant& vecCns, int count)
    {
        assert(count >= 1 && count <= 4);
        for (int i = 1; i < count; i++)
        {
            if (vecCns.i64[i] != vecCns.i64[0])
            {
                return false;
            }
        }
        return true;
    }

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
    //----------------------------------------------------------------------------------------------
    // ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method
    //
    //  Arguments:
    //     arg      - The argument to process
    //     argIdx   - The index of the argument being processed
    //     vecCns   - The vector constant being constructed
    //     baseType - The base type of the vector constant
    //
    //  Returns:
    //     true if arg was a constant; otherwise, false
    static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType)
    {
        switch (baseType)
        {
            case TYP_BYTE:
            case TYP_UBYTE:
            {
                if (arg->IsCnsIntOrI())
                {
                    vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i8[argIdx] == 0);
                }
                break;
            }

            case TYP_SHORT:
            case TYP_USHORT:
            {
                if (arg->IsCnsIntOrI())
                {
                    vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i16[argIdx] == 0);
                }
                break;
            }

            case TYP_INT:
            case TYP_UINT:
            {
                if (arg->IsCnsIntOrI())
                {
                    vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i32[argIdx] == 0);
                }
                break;
            }

            case TYP_LONG:
            case TYP_ULONG:
            {
450 451
#if defined(TARGET_64BIT)
                if (arg->IsCnsIntOrI())
452
                {
453 454 455 456 457 458 459 460 461 462 463 464 465 466
                    vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
#else
                if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI())
                {
                    // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT
                    // We need to reconstruct the 64-bit value in order to handle this

                    INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal;
                    gtLconVal <<= 32;
                    gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal;

                    vecCns.i64[argIdx] = gtLconVal;
467 468
                    return true;
                }
469
#endif // TARGET_64BIT
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i64[argIdx] == 0);
                }
                break;
            }

            case TYP_FLOAT:
            {
                if (arg->IsCnsFltOrDbl())
                {
                    vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    // We check against the i32, rather than f32, to account for -0.0
                    assert(vecCns.i32[argIdx] == 0);
                }
                break;
            }

            case TYP_DOUBLE:
            {
                if (arg->IsCnsFltOrDbl())
                {
                    vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    // We check against the i64, rather than f64, to account for -0.0
                    assert(vecCns.i64[argIdx] == 0);
                }
                break;
            }

            default:
            {
                unreached();
            }
        }

        return false;
    }
518
#endif // FEATURE_HW_INTRINSICS
519

520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
    //----------------------------------------------------------------------------------------------
    // TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at
    //                         least the size of expectedType
    //
    //  Arguments:
    //     expectedType - The expected type of the cast operation input if it is to be removed
    //     op           - The tree to remove if it is a cast op whose input is at least the size of expectedType
    //
    //  Returns:
    //     op if it was not a cast node or if its input is not at least the size of expected type;
    //     Otherwise, it returns the underlying operation that was being casted
    GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op)
    {
        if (!op->OperIs(GT_CAST))
        {
            return op;
        }

        GenTree* castOp = op->AsCast()->CastOp();

        if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType))
        {
            BlockRange().Remove(op);
            return castOp;
        }

        return op;
    }

549 550
    // Utility functions
public:
551
    static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB);
552 553

    // return true if 'childNode' is an immediate that can be contained
554 555
    //  by the 'parentNode' (i.e. folded into an instruction)
    //  for example small enough and non-relocatable
556
    bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const;
557

C
Carol Eidt 已提交
558 559 560 561 562 563
    // Return true if 'node' is a containable memory op.
    bool IsContainableMemoryOp(GenTree* node)
    {
        return m_lsra->isContainableMemoryOp(node);
    }

564 565
#ifdef FEATURE_HW_INTRINSICS
    // Return true if 'node' is a containable HWIntrinsic op.
566
    bool IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree* node, bool* supportsRegOptional);
567 568
#endif // FEATURE_HW_INTRINSICS

569 570
    static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block);

571 572 573 574 575
private:
    static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd);

    bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index);

576
    // Makes 'childNode' contained in the 'parentNode'
577
    void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const;
578 579 580

    // Checks and makes 'childNode' contained in the 'parentNode'
    bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode);
581 582 583

    // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode
    // can be contained.
584 585
    bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode);

586 587 588 589 590
    inline LIR::Range& BlockRange() const
    {
        return LIR::AsRange(m_block);
    }

591 592 593 594 595 596 597 598 599 600 601 602 603
    // Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister.
    // This method checks, and asserts in the DEBUG case if it is not so marked,
    // but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code.
    // This ensures that the local's value is valid on-stack as expected for a *LCL_FLD.
    void verifyLclFldDoNotEnregister(unsigned lclNum)
    {
        LclVarDsc* varDsc = &(comp->lvaTable[lclNum]);
        // Do a couple of simple checks before setting lvDoNotEnregister.
        // This may not cover all cases in 'isRegCandidate()' but we don't want to
        // do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister.
        if (varDsc->lvTracked && !varDsc->lvDoNotEnregister)
        {
            assert(!m_lsra->isRegCandidate(varDsc));
604
            comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DoNotEnregisterReason::LocalField));
605 606 607
        }
    }

608 609 610 611
    LinearScan*   m_lsra;
    unsigned      vtableCallTemp;       // local variable we use as a temp for vtable calls
    SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate
    BasicBlock*   m_block;
612 613 614
};

#endif // _LOWER_H_