未验证 提交 6febe5fe 编写于 作者: Y Yuang Liu 提交者: GitHub

rename the fuse op, test=allcase (#34120)

上级 14fd6cfb
......@@ -10,7 +10,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/softmax_mask_fuse_upper_triangle_op.h"
#include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h"
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
namespace paddle {
......@@ -82,7 +82,7 @@ class SoftmaxMaskFuseUpperTriangleGradOpMaker
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("softmax_mask_fuse_upper_triangle_grad");
op->SetType("fused_softmax_mask_upper_triangle_grad");
op->SetInput("Softmax", this->Output("Out"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
......@@ -94,13 +94,13 @@ class SoftmaxMaskFuseUpperTriangleGradOpMaker
namespace ops = paddle::operators;
REGISTER_OPERATOR(
softmax_mask_fuse_upper_triangle, ops::SoftmaxMaskFuseUpperTriangleOp,
fused_softmax_mask_upper_triangle, ops::SoftmaxMaskFuseUpperTriangleOp,
ops::SoftmaxMaskFuseUpperTriangleOpMaker,
ops::SoftmaxMaskFuseUpperTriangleGradOpMaker<paddle::framework::OpDesc>,
ops::SoftmaxMaskFuseUpperTriangleGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(softmax_mask_fuse_upper_triangle_grad,
REGISTER_OPERATOR(fused_softmax_mask_upper_triangle_grad,
ops::SoftmaxMaskFuseUpperTriangleOpGrad);
REGISTER_OP_CPU_KERNEL(softmax_mask_fuse_upper_triangle,
REGISTER_OP_CPU_KERNEL(fused_softmax_mask_upper_triangle,
ops::SoftmaxMaskFuseUpperTriangleCPUKernel<
paddle::platform::CPUDeviceContext, float>,
ops::SoftmaxMaskFuseUpperTriangleCPUKernel<
......
......@@ -31,7 +31,7 @@ limitations under the License. */
#include "paddle/fluid/framework/generator.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memcpy.h"
#include "paddle/fluid/operators/softmax_mask_fuse_upper_triangle_op.h"
#include "paddle/fluid/operators/fused_softmax_mask_upper_triangle_op.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
......@@ -534,12 +534,12 @@ class SoftmaxMaskFuseUpperTriangleGradKernel : public framework::OpKernel<T> {
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL(
softmax_mask_fuse_upper_triangle,
fused_softmax_mask_upper_triangle,
ops::SoftmaxMaskFuseUpperTriangleKernel<plat::CUDADeviceContext,
plat::float16>,
ops::SoftmaxMaskFuseUpperTriangleKernel<plat::CUDADeviceContext, float>);
REGISTER_OP_CUDA_KERNEL(
softmax_mask_fuse_upper_triangle_grad,
fused_softmax_mask_upper_triangle_grad,
ops::SoftmaxMaskFuseUpperTriangleGradKernel<plat::CUDADeviceContext,
plat::float16>,
ops::SoftmaxMaskFuseUpperTriangleGradKernel<plat::CUDADeviceContext,
......
......@@ -42,7 +42,7 @@ def _get_softmax_upper(x, fp16=True):
"core is not compiled with CUDA")
class TestSoftmaxMaskFuseOp(OpTest):
def setUp(self):
self.op_type = "softmax_mask_fuse_upper_triangle"
self.op_type = "fused_softmax_mask_upper_triangle"
x = np.random.random((1, 1, 32, 32)).astype("float16")
self.inputs = {'X': x}
rst = _get_softmax_upper(x)
......@@ -59,7 +59,7 @@ class TestSoftmaxMaskFuseOp(OpTest):
"core is not compiled with CUDA")
class TestSoftmaxMaskFuseOp1(OpTest):
def setUp(self):
self.op_type = "softmax_mask_fuse_upper_triangle"
self.op_type = "fused_softmax_mask_upper_triangle"
x = np.random.random((1, 1, 32, 32))
self.inputs = {'X': x}
rst = _get_softmax_upper(x)
......
......@@ -28,15 +28,15 @@ def softmax_mask_fuse_upper_triangle(x):
:return: the result of softmax mask fuse (upper triangle)
"""
if in_dygraph_mode():
out = core.ops.softmax_mask_fuse_upper_triangle(x)
out = core.ops.fused_softmax_mask_upper_triangle(x)
return out
helper = LayerHelper('softmax_mask_fuse_upper_triangle', **locals())
helper = LayerHelper('fused_softmax_mask_upper_triangle', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='softmax_mask_fuse_upper_triangle',
type='fused_softmax_mask_upper_triangle',
inputs={'X': [x]},
outputs={'Out': [out]})
return out
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册