未验证 提交 41882cbf 编写于 作者: S Sergey Andreenko 提交者: GitHub

Support !JitDoOldStructRetyping on other platforms. (#35943)

* Add more test cases.

* Initialize `ReturnTypeDesc` when we keep struct types.

* Add a few const modifiers.

* Additional checks in `LowerRet`

* Support `return double(cnst int)`.

* Optimize `LowerRetStruct`: no need for bitcast when read from memory.

* Prepare `LowerNode` for store local and local field to multireg.

* Compile the new methods with FEATURE_MULTIREG_RET.

* Improve `LowerRetStructLclVar`.

Don't use bitcast if the source is in memory or has the same type.

* Extract `LowerStoreLocCommon`.

* Support 3, 5. 6, 7 bytes structs in `LowerCallStruct`.

Move call handling to the users.

* Disable `JitDoOldStructRetyping` for x86 and x64.

Windows  x64 was supported in a previous PR, this adds x86 (Windows and Linux) and x64 Unix.

* Fix suggestions.

* Disable by default for the merge.
上级 fa99dbad
......@@ -9059,10 +9059,21 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
}
else
{
#if FEATURE_MULTIREG_RET
const ReturnTypeDesc* retTypeDesc = call->GetReturnTypeDesc();
const unsigned retRegCount = retTypeDesc->GetReturnRegCount();
assert(retRegCount != 0);
if (!compDoOldStructRetyping() && retRegCount == 1)
{
return call;
}
#else // !FEATURE_MULTIREG_RET
if (!compDoOldStructRetyping())
{
return call;
}
#endif // !FEATURE_MULTIREG_RET
assert(returnType != TYP_UNKNOWN);
// See if the struct size is smaller than the return
......@@ -9095,9 +9106,6 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
}
#if FEATURE_MULTIREG_RET
const unsigned retRegCount = call->GetReturnTypeDesc()->GetReturnRegCount();
assert(retRegCount != 0);
if (retRegCount >= 2)
{
if ((!call->CanTailCall()) && (!call->IsInlineCandidate()))
......@@ -9130,12 +9138,6 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
if (!compDoOldStructRetyping() && (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)))
{
// Don't retype `struct` as a primitive type in `ret` instruction.
return op;
}
JITDUMP("\nimpFixupStructReturnType: retyping\n");
DISPTREE(op);
......@@ -9250,6 +9252,12 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
#endif // FEATURE_MULTIREG_RET && FEATURE_HFA
if (!compDoOldStructRetyping() && (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)))
{
// Don't retype `struct` as a primitive type in `ret` instruction.
return op;
}
REDO_RETURN_NODE:
// adjust the type away from struct to integral
// and no normalizing
......@@ -15214,6 +15222,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
if (!compDoOldStructRetyping())
{
op1->AsCall()->gtRetClsHnd = classHandle;
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, classHandle);
#endif
}
tiRetVal = typeInfo(TI_STRUCT, classHandle);
......@@ -15258,6 +15269,9 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
if (!compDoOldStructRetyping())
{
#if FEATURE_MULTIREG_RET
op1->AsCall()->InitializeStructReturnType(this, tokenType);
#endif
op1->AsCall()->gtRetClsHnd = tokenType;
}
......
......@@ -437,13 +437,13 @@ CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSave
#endif // defined(TARGET_ARM64)
#endif // DEBUG
#if !FEATURE_MULTIREG_RET
#if defined(TARGET_ARMARCH)
CONFIG_INTEGER(JitDoOldStructRetyping, W("JitDoOldStructRetyping"), 1) // Allow Jit to retype structs as primitive types
// when possible.
#else // FEATURE_MULTIREG_RET
#else
CONFIG_INTEGER(JitDoOldStructRetyping, W("JitDoOldStructRetyping"), 1) // Allow Jit to retype structs as primitive types
// when possible.
#endif // FEATURE_MULTIREG_RET
#endif
#undef CONFIG_INTEGER
#undef CONFIG_STRING
......
此差异已折叠。
......@@ -92,7 +92,7 @@ private:
void ContainCheckStoreIndir(GenTreeIndir* indirNode);
void ContainCheckMul(GenTreeOp* node);
void ContainCheckShiftRotate(GenTreeOp* node);
void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc);
void ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const;
void ContainCheckCast(GenTreeCast* node);
void ContainCheckCompare(GenTreeOp* node);
void ContainCheckBinary(GenTreeOp* node);
......@@ -132,11 +132,14 @@ private:
GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
void LowerJmpMethod(GenTree* jmp);
void LowerRet(GenTreeUnOp* ret);
#if !FEATURE_MULTIREG_RET
void LowerStoreLocCommon(GenTreeLclVarCommon* lclVar);
void LowerRetStruct(GenTreeUnOp* ret);
void LowerRetStructLclVar(GenTreeUnOp* ret);
void LowerCallStruct(GenTreeCall* call);
#endif
void LowerStoreCallStruct(GenTreeBlk* store);
#if !defined(WINDOWS_AMD64_ABI)
GenTreeLclVar* SpillStructCallResult(GenTreeCall* call) const;
#endif // WINDOWS_AMD64_ABI
GenTree* LowerDelegateInvoke(GenTreeCall* call);
GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
GenTree* LowerDirectCall(GenTreeCall* call);
......@@ -459,7 +462,7 @@ public:
// return true if 'childNode' is an immediate that can be contained
// by the 'parentNode' (i.e. folded into an instruction)
// for example small enough and non-relocatable
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode);
bool IsContainableImmed(GenTree* parentNode, GenTree* childNode) const;
// Return true if 'node' is a containable memory op.
bool IsContainableMemoryOp(GenTree* node)
......@@ -478,7 +481,7 @@ private:
bool AreSourcesPossiblyModifiedLocals(GenTree* addr, GenTree* base, GenTree* index);
// Makes 'childNode' contained in the 'parentNode'
void MakeSrcContained(GenTree* parentNode, GenTree* childNode);
void MakeSrcContained(GenTree* parentNode, GenTree* childNode) const;
// Checks and makes 'childNode' contained in the 'parentNode'
bool CheckImmedAndMakeContained(GenTree* parentNode, GenTree* childNode);
......
......@@ -52,7 +52,7 @@ bool Lowering::IsCallTargetInRange(void* addr)
// TODO-CQ: we can contain a floating point 0.0 constant in a compare instruction
// (vcmp on arm, fcmp on arm64).
//
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
{
if (!varTypeIsFloating(parentNode->TypeGet()))
{
......@@ -973,7 +973,7 @@ void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc)
void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const
{
assert(storeLoc->OperIsLocalStore());
GenTree* op1 = storeLoc->gtGetOp1();
......
......@@ -2530,7 +2530,7 @@ bool Lowering::IsCallTargetInRange(void* addr)
}
// return true if the immediate can be folded into an instruction, for example small enough and non-relocatable
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode)
bool Lowering::IsContainableImmed(GenTree* parentNode, GenTree* childNode) const
{
if (!childNode->IsIntCnsFitsInI32())
{
......@@ -3066,7 +3066,7 @@ void Lowering::ContainCheckShiftRotate(GenTreeOp* node)
// Arguments:
// node - pointer to the node
//
void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc)
void Lowering::ContainCheckStoreLoc(GenTreeLclVarCommon* storeLoc) const
{
assert(storeLoc->OperIsLocalStore());
GenTree* op1 = storeLoc->gtGetOp1();
......
......@@ -927,6 +927,406 @@ public static void Test()
}
#endregion
class TestHFA
{
[MethodImpl(MethodImplOptions.NoInlining)]
static float ReturnFloat()
{
return 1;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static double ReturnDouble()
{
return 1;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector2 ReturnVector2()
{
return new Vector2(1);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector3 ReturnVector3()
{
return new Vector3(1);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector4 ReturnVector4()
{
return new Vector4(1);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector4 ReturnVector4UsingCall()
{
return ReturnVector4();
}
struct FloatWrapper
{
public float f;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static FloatWrapper ReturnFloatWrapper()
{
return new FloatWrapper();
}
struct DoubleWrapper
{
public double f;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static DoubleWrapper ReturnDoubleWrapper()
{
return new DoubleWrapper();
}
struct Floats2Wrapper
{
public float f1;
public float f2;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Floats2Wrapper ReturnFloats2Wrapper()
{
return new Floats2Wrapper();
}
struct Doubles2Wrapper
{
public double f1;
public double f2;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Doubles2Wrapper ReturnDoubles2Wrapper()
{
return new Doubles2Wrapper();
}
struct Floats3Wrapper
{
public float f1;
public float f2;
public float f3;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Floats3Wrapper ReturnFloats3Wrapper()
{
return new Floats3Wrapper();
}
struct Doubles3Wrapper
{
public double f1;
public double f2;
public double f3;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Doubles3Wrapper ReturnDoubles3Wrapper()
{
return new Doubles3Wrapper();
}
struct Floats4Wrapper
{
public float f1;
public float f2;
public float f3;
public float f4;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Floats4Wrapper ReturnFloats4Wrapper()
{
return new Floats4Wrapper();
}
struct Doubles4Wrapper
{
public double f1;
public double f2;
public double f3;
public double f4;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Doubles4Wrapper ReturnDoubles4Wrapper()
{
return new Doubles4Wrapper();
}
struct Vector2Wrapper
{
Vector2 f1;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector2Wrapper ReturnVector2Wrapper()
{
return new Vector2Wrapper();
}
struct Vector3Wrapper
{
Vector3 f1;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector3Wrapper ReturnVector3Wrapper()
{
return new Vector3Wrapper();
}
struct Vector4Wrapper
{
Vector4 f1;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector4Wrapper ReturnVector4Wrapper()
{
return new Vector4Wrapper();
}
struct Vector2x2Wrapper
{
Vector2 f1;
Vector2 f2;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Vector2x2Wrapper ReturnVector2x2Wrapper()
{
return new Vector2x2Wrapper();
}
[MethodImpl(MethodImplOptions.NoInlining)]
public static void Test()
{
ReturnFloat();
ReturnDouble();
ReturnVector2();
ReturnVector3();
ReturnVector4();
ReturnVector4UsingCall();
ReturnFloatWrapper();
ReturnDoubleWrapper();
ReturnFloats2Wrapper();
ReturnDoubles2Wrapper();
ReturnFloats3Wrapper();
ReturnDoubles3Wrapper();
ReturnFloats4Wrapper();
ReturnDoubles4Wrapper();
ReturnVector2Wrapper();
ReturnVector3Wrapper();
ReturnVector4Wrapper();
ReturnVector2x2Wrapper();
}
}
class TestNon2PowerStructs
{
[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct Byte3Struct
{
public byte f1;
public byte f2;
public byte f3;
[MethodImpl(MethodImplOptions.NoInlining)]
public Byte3Struct(int v)
{
f1 = 1;
f2 = 2;
f3 = 3;
}
}
[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct Byte5Struct
{
public byte f1;
public byte f2;
public byte f3;
public byte f4;
public byte f5;
[MethodImpl(MethodImplOptions.NoInlining)]
public Byte5Struct(int v)
{
f1 = 4;
f2 = 5;
f3 = 6;
f4 = 7;
f5 = 8;
}
}
[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct Byte6Struct
{
public byte f1;
public byte f2;
public byte f3;
public byte f4;
public byte f5;
public byte f6;
[MethodImpl(MethodImplOptions.NoInlining)]
public Byte6Struct(int v)
{
f1 = 9;
f2 = 10;
f3 = 11;
f4 = 12;
f5 = 13;
f6 = 14;
}
}
[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct Byte7Struct
{
public byte f1;
public byte f2;
public byte f3;
public byte f4;
public byte f5;
public byte f6;
public byte f7;
[MethodImpl(MethodImplOptions.NoInlining)]
public Byte7Struct(int v)
{
f1 = 15;
f2 = 16;
f3 = 17;
f4 = 18;
f5 = 19;
f6 = 20;
f7 = 21;
}
}
[StructLayout(LayoutKind.Sequential, Pack = 1)]
struct CompositeOfOddStructs
{
public Byte3Struct a;
public Byte5Struct b;
public Byte6Struct c;
public Byte7Struct d;
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Byte3Struct Return3()
{
return new Byte3Struct(0);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Byte5Struct Return5()
{
return new Byte5Struct(0);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Byte6Struct Return6()
{
return new Byte6Struct(0);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static Byte7Struct Return7()
{
return new Byte7Struct(0);
}
[MethodImpl(MethodImplOptions.NoInlining)]
static CompositeOfOddStructs CreateComposite()
{
CompositeOfOddStructs c = new CompositeOfOddStructs();
c.a = Return3();
c.b = Return5();
c.c = Return6();
c.d = Return7();
return c;
}
[MethodImpl(MethodImplOptions.NoInlining)]
public static void TestComposite()
{
var c = CreateComposite();
Debug.Assert(c.a.f1 == 1);
Debug.Assert(c.a.f2 == 2);
Debug.Assert(c.a.f3 == 3);
Debug.Assert(c.b.f1 == 4);
Debug.Assert(c.b.f2 == 5);
Debug.Assert(c.b.f3 == 6);
Debug.Assert(c.b.f4 == 7);
Debug.Assert(c.b.f5 == 8);
Debug.Assert(c.c.f1 == 9);
Debug.Assert(c.c.f2 == 10);
Debug.Assert(c.c.f3 == 11);
Debug.Assert(c.c.f4 == 12);
Debug.Assert(c.c.f5 == 13);
Debug.Assert(c.c.f6 == 14);
Debug.Assert(c.d.f1 == 15);
Debug.Assert(c.d.f2 == 16);
Debug.Assert(c.d.f3 == 17);
Debug.Assert(c.d.f4 == 18);
Debug.Assert(c.d.f5 == 19);
Debug.Assert(c.d.f6 == 20);
Debug.Assert(c.d.f7 == 21);
}
[MethodImpl(MethodImplOptions.NoInlining)]
public static byte TestLocals(int v)
{
var a = Return3();
var a1 = a;
a1.f1 = 0;
var b = Return5();
var c = Return6();
var d = Return7();
if (v == 0)
{
return a.f1;
}
else if (v == 1)
{
return b.f1;
}
else if (v == 3)
{
return c.f1;
}
else if (v == 4)
{
return d.f1;
}
else
{
return a1.f1;
}
}
[MethodImpl(MethodImplOptions.NoInlining)]
public static void Test()
{
TestComposite();
TestLocals(0);
}
}
class TestStructs
{
public static int Main()
......@@ -934,6 +1334,8 @@ public static int Main()
TestStructReturns.Test();
TestUnsafeCasts.Test();
TestMergeReturnBlocks.Test();
TestHFA.Test();
TestNon2PowerStructs.Test();
return 100;
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册