lower.h 21.8 KB
Newer Older
D
dotnet-bot 已提交
1 2
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
3 4 5 6 7 8 9 10

/*XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX                                                                           XX
XX                               Lower                                       XX
XX                                                                           XX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
11
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
12 13 14 15 16 17
*/

#ifndef _LOWER_H_
#define _LOWER_H_

#include "compiler.h"
18
#include "phase.h"
19
#include "lsra.h"
20
#include "sideeffects.h"
21

22
class Lowering final : public Phase
23 24 25
{
public:
    inline Lowering(Compiler* compiler, LinearScanInterface* lsra)
26
        : Phase(compiler, PHASE_LOWERING), vtableCallTemp(BAD_VAR_NUM)
27
    {
28
        m_lsra = (LinearScan*)lsra;
29 30
        assert(m_lsra);
    }
31
    virtual PhaseStatus DoPhase() override;
32

33 34 35 36 37 38 39 40 41 42 43
    // This variant of LowerRange is called from outside of the main Lowering pass,
    // so it creates its own instance of Lowering to do so.
    void LowerRange(BasicBlock* block, LIR::ReadOnlyRange& range)
    {
        Lowering lowerer(comp, m_lsra);
        lowerer.m_block = block;

        lowerer.LowerRange(range);
    }

private:
44
    // LowerRange handles new code that is introduced by or after Lowering.
45
    void LowerRange(LIR::ReadOnlyRange& range)
46 47 48 49 50 51
    {
        for (GenTree* newNode : range)
        {
            LowerNode(newNode);
        }
    }
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
    void LowerRange(GenTree* firstNode, GenTree* lastNode)
    {
        LIR::ReadOnlyRange range(firstNode, lastNode);
        LowerRange(range);
    }

    // ContainCheckRange handles new code that is introduced by or after Lowering,
    // and that is known to be already in Lowered form.
    void ContainCheckRange(LIR::ReadOnlyRange& range)
    {
        for (GenTree* newNode : range)
        {
            ContainCheckNode(newNode);
        }
    }
    void ContainCheckRange(GenTree* firstNode, GenTree* lastNode)
    {
        LIR::ReadOnlyRange range(firstNode, lastNode);
        ContainCheckRange(range);
    }

    void InsertTreeBeforeAndContainCheck(GenTree* insertionPoint, GenTree* tree)
    {
        LIR::Range range = LIR::SeqTree(comp, tree);
        ContainCheckRange(range);
        BlockRange().InsertBefore(insertionPoint, std::move(range));
    }

    void ContainCheckNode(GenTree* node);
81

C
Carol Eidt 已提交
82 83 84 85
    void ContainCheckDivOrMod(GenTreeOp* node);
    void ContainCheckReturnTrap(GenTreeOp* node);
    void ContainCheckArrOffset(GenTreeArrOffs* node);
    void ContainCheckLclHeap(GenTreeOp* node);
S
Sergey Andreenko 已提交
86
    void ContainCheckRet(GenTreeUnOp* ret);
87 88
    void ContainCheckJTrue(GenTreeOp* node);

89
    void ContainCheckBitCast(GenTree* node);
90 91 92
    void ContainCheckCallOperands(GenTreeCall* call);
    void ContainCheckIndir(GenTreeIndir* indirNode);
    void ContainCheckStoreIndir(GenTreeIndir* indirNode);
C
Carol Eidt 已提交
93 94
    void ContainCheckMul(GenTreeOp* node);
    void ContainCheckShiftRotate(GenTreeOp* node);
95
    void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const;
C
Carol Eidt 已提交
96 97
    void ContainCheckCast(GenTreeCast* node);
    void ContainCheckCompare(GenTreeOp* node);
98
    void ContainCheckBinary(GenTreeOp* node);
C
Carol Eidt 已提交
99
    void ContainCheckBoundsChk(GenTreeBoundsChk* node);
100
#ifdef TARGET_XARCH
C
Carol Eidt 已提交
101 102
    void ContainCheckFloatBinary(GenTreeOp* node);
    void ContainCheckIntrinsic(GenTreeOp* node);
103
#endif // TARGET_XARCH
C
Carol Eidt 已提交
104 105 106
#ifdef FEATURE_SIMD
    void ContainCheckSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
107
#ifdef FEATURE_HW_INTRINSICS
108
    void ContainCheckHWIntrinsicAddr(GenTreeHWIntrinsic* node, GenTree* addr);
109 110
    void ContainCheckHWIntrinsic(GenTreeHWIntrinsic* node);
#endif // FEATURE_HW_INTRINSICS
C
Carol Eidt 已提交
111

112 113 114
#ifdef DEBUG
    static void CheckCallArg(GenTree* arg);
    static void CheckCall(GenTreeCall* call);
115
    static void CheckNode(Compiler* compiler, GenTree* node);
116 117 118 119 120
    static bool CheckBlock(Compiler* compiler, BasicBlock* block);
#endif // DEBUG

    void LowerBlock(BasicBlock* block);
    GenTree* LowerNode(GenTree* node);
121

122 123 124
    // ------------------------------
    // Call Lowering
    // ------------------------------
125
    void LowerCall(GenTree* call);
126
#ifndef TARGET_64BIT
127 128 129 130
    GenTree* DecomposeLongCompare(GenTree* cmp);
#endif
    GenTree* OptimizeConstCompare(GenTree* cmp);
    GenTree* LowerCompare(GenTree* cmp);
M
Mike Danes 已提交
131
    GenTree* LowerJTrue(GenTreeOp* jtrue);
132
    GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
133
    void LowerJmpMethod(GenTree* jmp);
S
Sergey Andreenko 已提交
134
    void LowerRet(GenTreeUnOp* ret);
135
    void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
136
    void LowerRetStruct(GenTreeUnOp* ret);
137
    void LowerRetSingleRegStructLclVar(GenTreeUnOp* ret);
138
    void LowerCallStruct(GenTreeCall* call);
139
    void LowerStoreSingleRegCallStruct(GenTreeBlk* store);
140 141 142
#if !defined(WINDOWS_AMD64_ABI)
    GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const;
#endif // WINDOWS_AMD64_ABI
143 144 145 146
    GenTree* LowerDelegateInvoke(GenTreeCall* call);
    GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
    GenTree* LowerDirectCall(GenTreeCall* call);
    GenTree* LowerNonvirtPinvokeCall(GenTreeCall* call);
147
    GenTree* LowerTailCallViaJitHelper(GenTreeCall* callNode, GenTree* callTarget);
148
    void LowerFastTailCall(GenTreeCall* callNode);
149 150 151 152
    void RehomeArgForFastTailCall(unsigned int lclNum,
                                  GenTree*     insertTempBefore,
                                  GenTree*     lookForUsesStart,
                                  GenTreeCall* callNode);
153 154 155 156
    void InsertProfTailCallHook(GenTreeCall* callNode, GenTree* insertionPoint);
    GenTree* LowerVirtualVtableCall(GenTreeCall* call);
    GenTree* LowerVirtualStubCall(GenTreeCall* call);
    void LowerArgsForCall(GenTreeCall* call);
157 158 159
    void ReplaceArgWithPutArgOrBitcast(GenTree** ppChild, GenTree* newNode);
    GenTree* NewPutArg(GenTreeCall* call, GenTree* arg, fgArgTabEntry* info, var_types type);
    void LowerArg(GenTreeCall* call, GenTree** ppTree);
160
#ifdef TARGET_ARMARCH
161
    GenTree* LowerFloatArg(GenTree** pArg, fgArgTabEntry* info);
162 163 164
    GenTree* LowerFloatArgReg(GenTree* arg, regNumber regNum);
#endif

165 166 167
    void InsertPInvokeCallProlog(GenTreeCall* call);
    void InsertPInvokeCallEpilog(GenTreeCall* call);
    void InsertPInvokeMethodProlog();
168
    void InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* lastExpr));
169 170 171 172 173 174 175 176
    GenTree* SetGCState(int cns);
    GenTree* CreateReturnTrapSeq();
    enum FrameLinkAction
    {
        PushFrame,
        PopFrame
    };
    GenTree* CreateFrameLinkUpdate(FrameLinkAction);
177 178
    GenTree* AddrGen(ssize_t addr);
    GenTree* AddrGen(void* addr);
179

180
    GenTree* Ind(GenTree* tree, var_types type = TYP_I_IMPL)
181
    {
182
        return comp->gtNewOperNode(GT_IND, type, tree);
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
    }

    GenTree* PhysReg(regNumber reg, var_types type = TYP_I_IMPL)
    {
        return comp->gtNewPhysRegNode(reg, type);
    }

    GenTree* ThisReg(GenTreeCall* call)
    {
        return PhysReg(comp->codeGen->genGetThisArgReg(call), TYP_REF);
    }

    GenTree* Offset(GenTree* base, unsigned offset)
    {
        var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
198
        return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
199 200
    }

201 202 203 204 205 206
    GenTree* OffsetByIndex(GenTree* base, GenTree* index)
    {
        var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
        return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0);
    }

207 208 209 210 211 212
    GenTree* OffsetByIndexWithScale(GenTree* base, GenTree* index, unsigned scale)
    {
        var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
        return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, scale, 0);
    }

213
    // Replace the definition of the given use with a lclVar, allocating a new temp
214 215
    // if 'tempNum' is BAD_VAR_NUM. Returns the LclVar node.
    GenTreeLclVar* ReplaceWithLclVar(LIR::Use& use, unsigned tempNum = BAD_VAR_NUM)
216 217 218 219
    {
        GenTree* oldUseNode = use.Def();
        if ((oldUseNode->gtOper != GT_LCL_VAR) || (tempNum != BAD_VAR_NUM))
        {
220
            use.ReplaceWithLclVar(comp, tempNum);
221 222
            GenTree* newUseNode = use.Def();
            ContainCheckRange(oldUseNode->gtNext, newUseNode);
223
            return newUseNode->AsLclVar();
224
        }
225
        return oldUseNode->AsLclVar();
226 227
    }

228
    // return true if this call target is within range of a pc-rel call on the machine
229
    bool IsCallTargetInRange(void* addr);
230

231
#if defined(TARGET_XARCH)
S
sivarv 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244
    GenTree* PreferredRegOptionalOperand(GenTree* tree);

    // ------------------------------------------------------------------
    // SetRegOptionalBinOp - Indicates which of the operands of a bin-op
    // register requirement is optional. Xarch instruction set allows
    // either of op1 or op2 of binary operation (e.g. add, mul etc) to be
    // a memory operand.  This routine provides info to register allocator
    // which of its operands optionally require a register.  Lsra might not
    // allocate a register to RefTypeUse positions of such operands if it
    // is beneficial. In such a case codegen will treat them as memory
    // operands.
    //
    // Arguments:
245 246 247
    //     tree  -             Gentree of a binary operation.
    //     isSafeToMarkOp1     True if it's safe to mark op1 as register optional
    //     isSafeToMarkOp2     True if it's safe to mark op2 as register optional
S
sivarv 已提交
248
    //
249
    // Returns
250 251
    //     The caller is expected to get isSafeToMarkOp1 and isSafeToMarkOp2
    //     by calling IsSafeToContainMem.
252
    //
S
sivarv 已提交
253 254 255
    // Note: On xarch at most only one of the operands will be marked as
    // reg optional, even when both operands could be considered register
    // optional.
256
    void SetRegOptionalForBinOp(GenTree* tree, bool isSafeToMarkOp1, bool isSafeToMarkOp2)
S
sivarv 已提交
257 258 259
    {
        assert(GenTree::OperIsBinary(tree->OperGet()));

260 261
        GenTree* const op1 = tree->gtGetOp1();
        GenTree* const op2 = tree->gtGetOp2();
S
sivarv 已提交
262

263 264
        const unsigned operatorSize = genTypeSize(tree->TypeGet());

265 266 267
        const bool op1Legal =
            isSafeToMarkOp1 && tree->OperIsCommutative() && (operatorSize == genTypeSize(op1->TypeGet()));
        const bool op2Legal = isSafeToMarkOp2 && (operatorSize == genTypeSize(op2->TypeGet()));
268

C
Carol Eidt 已提交
269
        GenTree* regOptionalOperand = nullptr;
270
        if (op1Legal)
S
sivarv 已提交
271
        {
C
Carol Eidt 已提交
272
            regOptionalOperand = op2Legal ? PreferredRegOptionalOperand(tree) : op1;
S
sivarv 已提交
273
        }
274
        else if (op2Legal)
S
sivarv 已提交
275
        {
C
Carol Eidt 已提交
276 277 278 279 280
            regOptionalOperand = op2;
        }
        if (regOptionalOperand != nullptr)
        {
            regOptionalOperand->SetRegOptional();
S
sivarv 已提交
281 282
        }
    }
283
#endif // defined(TARGET_XARCH)
C
Carol Eidt 已提交
284

C
Carol Eidt 已提交
285
    // Per tree node member functions
286 287
    void LowerStoreIndirCommon(GenTreeIndir* ind);
    void LowerIndir(GenTreeIndir* ind);
C
Carol Eidt 已提交
288
    void LowerStoreIndir(GenTreeIndir* node);
289
    GenTree* LowerAdd(GenTreeOp* node);
C
Carol Eidt 已提交
290 291 292 293
    bool LowerUnsignedDivOrMod(GenTreeOp* divMod);
    GenTree* LowerConstIntDivOrMod(GenTree* node);
    GenTree* LowerSignedDivOrMod(GenTree* node);
    void LowerBlockStore(GenTreeBlk* blkNode);
294
    void LowerBlockStoreCommon(GenTreeBlk* blkNode);
M
Mike Danes 已提交
295
    void ContainBlockStoreAddress(GenTreeBlk* blkNode, unsigned size, GenTree* addr);
C
Carol Eidt 已提交
296
    void LowerPutArgStk(GenTreePutArgStk* tree);
297

298
    bool TryCreateAddrMode(GenTree* addr, bool isContainable);
299

300 301
    bool TryTransformStoreObjAsStoreInd(GenTreeBlk* blkNode);

302
    GenTree* LowerSwitch(GenTree* node);
M
Mike Danes 已提交
303 304 305
    bool TryLowerSwitchToBitTest(
        BasicBlock* jumpTable[], unsigned jumpCount, unsigned targetCount, BasicBlock* bbSwitch, GenTree* switchValue);

306
    void LowerCast(GenTree* node);
307 308

#if !CPU_LOAD_STORE_ARCH
309
    bool IsRMWIndirCandidate(GenTree* operand, GenTree* storeInd);
310 311
    bool IsBinOpInRMWStoreInd(GenTree* tree);
    bool IsRMWMemOpRootedAtStoreInd(GenTree* storeIndTree, GenTree** indirCandidate, GenTree** indirOpSource);
C
Carol Eidt 已提交
312
    bool LowerRMWMemOp(GenTreeIndir* storeInd);
313
#endif
C
Carol Eidt 已提交
314 315

    void WidenSIMD12IfNecessary(GenTreeLclVarCommon* node);
316
    bool CheckMultiRegLclVar(GenTreeLclVar* lclNode, const ReturnTypeDesc* retTypeDesc);
317
    void LowerStoreLoc(GenTreeLclVarCommon* tree);
318
    GenTree* LowerArrElem(GenTree* node);
319
    void LowerRotate(GenTree* tree);
320
    void LowerShift(GenTreeOp* shift);
321 322 323
#ifdef FEATURE_SIMD
    void LowerSIMD(GenTreeSIMD* simdNode);
#endif // FEATURE_SIMD
324 325
#ifdef FEATURE_HW_INTRINSICS
    void LowerHWIntrinsic(GenTreeHWIntrinsic* node);
326
    void LowerHWIntrinsicCC(GenTreeHWIntrinsic* node, NamedIntrinsic newIntrinsicId, GenCondition condition);
327
    void LowerHWIntrinsicCmpOp(GenTreeHWIntrinsic* node, genTreeOps cmpOp);
328
    void LowerHWIntrinsicCreate(GenTreeHWIntrinsic* node);
329 330
    void LowerHWIntrinsicDot(GenTreeHWIntrinsic* node);
#if defined(TARGET_XARCH)
331
    void LowerFusedMultiplyAdd(GenTreeHWIntrinsic* node);
332 333
    void LowerHWIntrinsicToScalar(GenTreeHWIntrinsic* node);
#elif defined(TARGET_ARM64)
334
    bool IsValidConstForMovImm(GenTreeHWIntrinsic* node);
335
    void LowerHWIntrinsicFusedMultiplyAddScalar(GenTreeHWIntrinsic* node);
336
#endif // !TARGET_XARCH && !TARGET_ARM64
337

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
    union VectorConstant {
        int8_t   i8[32];
        uint8_t  u8[32];
        int16_t  i16[16];
        uint16_t u16[16];
        int32_t  i32[8];
        uint32_t u32[8];
        int64_t  i64[4];
        uint64_t u64[4];
        float    f32[8];
        double   f64[4];
    };

    //----------------------------------------------------------------------------------------------
    // ProcessArgForHWIntrinsicCreate: Processes an argument for the Lowering::LowerHWIntrinsicCreate method
    //
    //  Arguments:
    //     arg      - The argument to process
    //     argIdx   - The index of the argument being processed
    //     vecCns   - The vector constant being constructed
    //     baseType - The base type of the vector constant
    //
    //  Returns:
    //     true if arg was a constant; otherwise, false
    static bool HandleArgForHWIntrinsicCreate(GenTree* arg, int argIdx, VectorConstant& vecCns, var_types baseType)
    {
        switch (baseType)
        {
            case TYP_BYTE:
            case TYP_UBYTE:
            {
                if (arg->IsCnsIntOrI())
                {
                    vecCns.i8[argIdx] = static_cast<int8_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i8[argIdx] == 0);
                }
                break;
            }

            case TYP_SHORT:
            case TYP_USHORT:
            {
                if (arg->IsCnsIntOrI())
                {
                    vecCns.i16[argIdx] = static_cast<int16_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i16[argIdx] == 0);
                }
                break;
            }

            case TYP_INT:
            case TYP_UINT:
            {
                if (arg->IsCnsIntOrI())
                {
                    vecCns.i32[argIdx] = static_cast<int32_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i32[argIdx] == 0);
                }
                break;
            }

            case TYP_LONG:
            case TYP_ULONG:
            {
417 418
#if defined(TARGET_64BIT)
                if (arg->IsCnsIntOrI())
419
                {
420 421 422 423 424 425 426 427 428 429 430 431 432 433
                    vecCns.i64[argIdx] = static_cast<int64_t>(arg->AsIntCon()->gtIconVal);
                    return true;
                }
#else
                if (arg->OperIsLong() && arg->AsOp()->gtOp1->IsCnsIntOrI() && arg->AsOp()->gtOp2->IsCnsIntOrI())
                {
                    // 32-bit targets will decompose GT_CNS_LNG into two GT_CNS_INT
                    // We need to reconstruct the 64-bit value in order to handle this

                    INT64 gtLconVal = arg->AsOp()->gtOp2->AsIntCon()->gtIconVal;
                    gtLconVal <<= 32;
                    gtLconVal |= arg->AsOp()->gtOp1->AsIntCon()->gtIconVal;

                    vecCns.i64[argIdx] = gtLconVal;
434 435
                    return true;
                }
436
#endif // TARGET_64BIT
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    assert(vecCns.i64[argIdx] == 0);
                }
                break;
            }

            case TYP_FLOAT:
            {
                if (arg->IsCnsFltOrDbl())
                {
                    vecCns.f32[argIdx] = static_cast<float>(arg->AsDblCon()->gtDconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    // We check against the i32, rather than f32, to account for -0.0
                    assert(vecCns.i32[argIdx] == 0);
                }
                break;
            }

            case TYP_DOUBLE:
            {
                if (arg->IsCnsFltOrDbl())
                {
                    vecCns.f64[argIdx] = static_cast<double>(arg->AsDblCon()->gtDconVal);
                    return true;
                }
                else
                {
                    // We expect the VectorConstant to have been already zeroed
                    // We check against the i64, rather than f64, to account for -0.0
                    assert(vecCns.i64[argIdx] == 0);
                }
                break;
            }

            default:
            {
                unreached();
            }
        }

        return false;
    }
485
#endif // FEATURE_HW_INTRINSICS
486

487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
    //----------------------------------------------------------------------------------------------
    // TryRemoveCastIfPresent: Removes op it is a cast operation and the size of its input is at
    //                         least the size of expectedType
    //
    //  Arguments:
    //     expectedType - The expected type of the cast operation input if it is to be removed
    //     op           - The tree to remove if it is a cast op whose input is at least the size of expectedType
    //
    //  Returns:
    //     op if it was not a cast node or if its input is not at least the size of expected type;
    //     Otherwise, it returns the underlying operation that was being casted
    GenTree* TryRemoveCastIfPresent(var_types expectedType, GenTree* op)
    {
        if (!op->OperIs(GT_CAST))
        {
            return op;
        }

        GenTree* castOp = op->AsCast()->CastOp();

        if (genTypeSize(castOp->gtType) >= genTypeSize(expectedType))
        {
            BlockRange().Remove(op);
            return castOp;
        }

        return op;
    }

516 517
    // Utility functions
public:
518
    static bool IndirsAreEquivalent(GenTree* pTreeA, GenTree* pTreeB);
519 520

    // return true if 'childNode' is an immediate that can be contained
521 522
    //  by the 'parentNode' (i.e. folded into an instruction)
    //  for example small enough and non-relocatable
523
    bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const;
524

C
Carol Eidt 已提交
525 526 527 528 529 530
    // Return true if 'node' is a containable memory op.
    bool IsContainableMemoryOp(GenTree* node)
    {
        return m_lsra->isContainableMemoryOp(node);
    }

531 532
#ifdef FEATURE_HW_INTRINSICS
    // Return true if 'node' is a containable HWIntrinsic op.
533
    bool IsContainableHWIntrinsicOp(GenTreeHWIntrinsic* containingNode, GenTree* node, bool* supportsRegOptional);
534 535
#endif // FEATURE_HW_INTRINSICS

536 537
    static void TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, BasicBlock* block);

538 539 540 541 542
private:
    static bool NodesAreEquivalentLeaves(GenTree* candidate, GenTree* storeInd);

    bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index);

543
    // Makes 'childNode' contained in the 'parentNode'
544
    void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const;
545 546 547

    // Checks and makes 'childNode' contained in the 'parentNode'
    bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode);
548 549 550

    // Checks for memory conflicts in the instructions between childNode and parentNode, and returns true if childNode
    // can be contained.
551 552
    bool IsSafeToContainMem(GenTree* parentNode, GenTree* childNode);

553 554 555 556 557
    inline LIR::Range& BlockRange() const
    {
        return LIR::AsRange(m_block);
    }

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
    // Any tracked lclVar accessed by a LCL_FLD or STORE_LCL_FLD should be marked doNotEnregister.
    // This method checks, and asserts in the DEBUG case if it is not so marked,
    // but in the non-DEBUG case (asserts disabled) set the flag so that we don't generate bad code.
    // This ensures that the local's value is valid on-stack as expected for a *LCL_FLD.
    void verifyLclFldDoNotEnregister(unsigned lclNum)
    {
        LclVarDsc* varDsc = &(comp->lvaTable[lclNum]);
        // Do a couple of simple checks before setting lvDoNotEnregister.
        // This may not cover all cases in 'isRegCandidate()' but we don't want to
        // do an expensive check here. For non-candidates it is not harmful to set lvDoNotEnregister.
        if (varDsc->lvTracked && !varDsc->lvDoNotEnregister)
        {
            assert(!m_lsra->isRegCandidate(varDsc));
            comp->lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(Compiler::DNER_LocalField));
        }
    }

575 576 577 578
    LinearScan*   m_lsra;
    unsigned      vtableCallTemp;       // local variable we use as a temp for vtable calls
    SideEffectSet m_scratchSideEffects; // SideEffectSet used for IsSafeToContainMem and isRMWIndirCandidate
    BasicBlock*   m_block;
579 580 581
};

#endif // _LOWER_H_