未验证 提交 57201d9d 编写于 作者: W Wang Xin 提交者: GitHub

add autogen code for clip_by_norm op (#52743)

* add autogen code for clip_by_norm op

* bug fixed
上级 e54e2bc8
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/clip_by_norm_op.h"
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(clip_by_norm,
ClipByNormInferShapeFunctor,
PD_INFER_META(phi::ClipByNormInferMeta));
REGISTER_OP_WITHOUT_GRADIENT(clip_by_norm,
ops::ClipByNormOp,
ops::ClipByNormOpMaker,
ClipByNormInferShapeFunctor);
......@@ -222,14 +222,6 @@
kernel :
func : class_center_sample
- op : clip_by_norm
args : (Tensor x, float max_norm)
output : Tensor(out)
infer_meta :
func : ClipByNormInferMeta
kernel :
func : clip_by_norm
- op : coalesce_tensor
args : (Tensor[] input, DataType dtype, bool copy_data = false, bool set_constant = false, bool persist_output = false, float constant = 0.0, bool use_align = true, int align_size = -1, int size_of_dtype = -1, int64_t[] concated_shapes = {}, int64_t[] concated_ranks = {})
output : Tensor[](output){input.size()}, Tensor(fused_output)
......
......@@ -356,6 +356,12 @@
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- op : clip_by_norm
inputs :
x : X
outputs :
out : Out
- op : complex
backward : complex_grad
inputs :
......
......@@ -355,6 +355,15 @@
data_type : x
backward : clip_grad
- op : clip_by_norm
args : (Tensor x, float max_norm)
output : Tensor(out)
infer_meta :
func : ClipByNormInferMeta
kernel :
func : clip_by_norm {dense -> dense}
clip_by_norm_sr {selected_rows -> selected_rows}
- op : complex
args : (Tensor real, Tensor imag)
output : Tensor
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature ClipByNormOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.IsDenseTensorInput("X")) {
return KernelSignature("clip_by_norm", {"X"}, {"max_norm"}, {"Out"});
} else if (ctx.IsSelectedRowsInput("X")) {
return KernelSignature("clip_by_norm_sr", {"X"}, {"max_norm"}, {"Out"});
}
return KernelSignature("unregistered", {}, {}, {});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(clip_by_norm, phi::ClipByNormOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册