From 11a6149bafe57218874fc37b8c412eb5bedf013a Mon Sep 17 00:00:00 2001 From: limingshu <61349199+JamesLim-sy@users.noreply.github.com> Date: Mon, 6 Mar 2023 19:57:25 +0800 Subject: [PATCH] [BugFix] Fix error attributes setting in FusedLinear unitest (#50359) * first commit. * change host logic * fix code bugs * fix code error --------- Co-authored-by: zhangbopd <1299246947@qq.com> --- .../tests/unittests/test_fused_gemm_epilogue_grad_op.py | 8 ++++---- .../unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py index 21b6d7e29cf..f6275cb34ec 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_gemm_epilogue_grad_op.py @@ -46,7 +46,7 @@ class TestFuseGemmEpilogueGradOpDXYBiasFP16(OpTest): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} DX, DY, DBias = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] @@ -105,7 +105,7 @@ class TestFuseGemmEpilogueGradOpDYBiasFP16(OpTest): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} _, DY, DBias = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] @@ -164,7 +164,7 @@ class TestFuseGemmEpilogueGradOpDYFP16(OpTest): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} _, DY, _ = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] @@ -219,7 +219,7 @@ class TestFuseGemmEpilogueGradOpDXYFP16(OpTest): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} DX, DY, _ = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py index 01b265237c5..ea0143d802d 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_grad_op_xpu.py @@ -60,7 +60,7 @@ class XPUTestFuseGemmGradOp(XPUOpTestWrapper): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} DX, DY, DBias = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] @@ -81,7 +81,7 @@ class XPUTestFuseGemmGradOp(XPUOpTestWrapper): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} _, DY, DBias = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] @@ -96,7 +96,7 @@ class XPUTestFuseGemmGradOp(XPUOpTestWrapper): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} _, DY, _ = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] @@ -111,7 +111,7 @@ class XPUTestFuseGemmGradOp(XPUOpTestWrapper): 'Y': np.random.random((4, 128)).astype(self.dtype) - 0.5, } - self.attrs = {"activation": 'none'} + self.attrs = {"activation_grad": 'none'} DX, DY, _ = get_outputs( self.inputs['DOut'], self.inputs['X'], self.inputs['Y'] -- GitLab