未验证 提交 5b1dd387 编写于 作者: Z zyfncg 提交者: GitHub

[code-gen] Generate static graph code for exp op (#47120)

* gene static graph code for exp

* refactor the doc of exp

* fix bug

* fix bug

* update doc of exp

* fix sparse op
上级 1431265d
...@@ -159,13 +159,6 @@ $$out = \\log \\frac{1}{1 + e^{-x}}$$ ...@@ -159,13 +159,6 @@ $$out = \\log \\frac{1}{1 + e^{-x}}$$
)DOC"; )DOC";
UNUSED constexpr char ExpDoc[] = R"DOC(
Exp Operator. Computes exp of x element-wise with a natural number :math:`e` as the base.
$$out = e^x$$
)DOC";
UNUSED constexpr char Expm1Doc[] = R"DOC( UNUSED constexpr char Expm1Doc[] = R"DOC(
Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base. Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base.
...@@ -806,7 +799,6 @@ It is recommended to use the defaults for this activation. ...@@ -806,7 +799,6 @@ It is recommended to use the defaults for this activation.
REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc); REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Silu, SiluDoc); REGISTER_ACTIVATION_OP_MAKER(Silu, SiluDoc);
REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc); REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Exp, ExpDoc);
REGISTER_ACTIVATION_OP_MAKER(Expm1, Expm1Doc); REGISTER_ACTIVATION_OP_MAKER(Expm1, Expm1Doc);
REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc); REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc);
REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc); REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc);
...@@ -1721,23 +1713,6 @@ REGISTER_OPERATOR(pow_grad, ...@@ -1721,23 +1713,6 @@ REGISTER_OPERATOR(pow_grad,
ops::ActivationGradOpInplaceInferer); ops::ActivationGradOpInplaceInferer);
/* ========================================================================== */ /* ========================================================================== */
/* ========================== exp register ============================ */
REGISTER_OPERATOR(
exp,
ops::ActivationOp,
ops::ExpOpMaker,
ops::ActivationOpInferVarType,
ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
paddle::framework::OpDesc>,
ops::ActivationGradOpMaker<ops::ExpGradFunctor<float>::FwdDeps(),
paddle::imperative::OpBase>,
std::conditional<ops::CanInplaceAct<ops::ExpGradFunctor<float>>(),
ops::ActFwdInplaceInferer,
void>::type);
REGISTER_OPERATOR(exp_grad,
ops::ActivationOpGrad,
ops::ActivationGradOpInplaceInferer);
/* ========================== Log register ==================================*/ /* ========================== Log register ==================================*/
REGISTER_OPERATOR( REGISTER_OPERATOR(
log, log,
......
...@@ -273,7 +273,6 @@ USE_PHI_FUNCTOR(Asinh) ...@@ -273,7 +273,6 @@ USE_PHI_FUNCTOR(Asinh)
USE_PHI_FUNCTOR(Acosh) USE_PHI_FUNCTOR(Acosh)
USE_PHI_FUNCTOR(Atanh) USE_PHI_FUNCTOR(Atanh)
USE_PHI_FUNCTOR(Tanh) USE_PHI_FUNCTOR(Tanh)
USE_PHI_FUNCTOR(Exp)
USE_PHI_DOUBLE_GRAD_FUNCTOR(Tanh) USE_PHI_DOUBLE_GRAD_FUNCTOR(Tanh)
USE_PHI_TRIPLE_GRAD_FUNCTOR(Tanh) USE_PHI_TRIPLE_GRAD_FUNCTOR(Tanh)
USE_PHI_FUNCTOR(BRelu) USE_PHI_FUNCTOR(BRelu)
...@@ -301,7 +300,6 @@ USE_PHI_FUNCTOR(Log1p) ...@@ -301,7 +300,6 @@ USE_PHI_FUNCTOR(Log1p)
USE_PHI_FUNCTOR(Swish) USE_PHI_FUNCTOR(Swish)
USE_PHI_FUNCTOR(HardSwish) USE_PHI_FUNCTOR(HardSwish)
USE_PHI_FUNCTOR(Pow) USE_PHI_FUNCTOR(Pow)
USE_PHI_FUNCTOR(Exp)
USE_PHI_FUNCTOR(Expm1) USE_PHI_FUNCTOR(Expm1)
USE_PHI_FUNCTOR(Mish) USE_PHI_FUNCTOR(Mish)
USE_PHI_FUNCTOR(STanh) USE_PHI_FUNCTOR(STanh)
......
...@@ -447,6 +447,11 @@ GenerateOpFunctions(int split_count) { ...@@ -447,6 +447,11 @@ GenerateOpFunctions(int split_count) {
!phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type)) { !phi::KernelFactory::Instance().HasCompatiblePhiKernel(op_type)) {
continue; continue;
} }
// Skip the sparse op
if (op_type.compare(0, 7, "sparse_") == 0 && op_type != "sparse_momentum" &&
op_type != "sparse_attention") {
continue;
}
op_info_map_need_gen.emplace(pair); op_info_map_need_gen.emplace(pair);
} }
......
...@@ -115,6 +115,17 @@ ...@@ -115,6 +115,17 @@
kernel : kernel :
func : erfinv_grad func : erfinv_grad
- backward_op : exp_grad
forward : exp (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : exp_grad
inplace : (out_grad -> x_grad)
- backward_op : fft_c2c_grad - backward_op : fft_c2c_grad
forward: fft_c2c(Tensor x, int64_t[] axes, str normalization, bool forward) -> Tensor(out) forward: fft_c2c(Tensor x, int64_t[] axes, str normalization, bool forward) -> Tensor(out)
args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward) args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward)
......
...@@ -136,7 +136,6 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): ...@@ -136,7 +136,6 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict):
key = args_map[key] key = args_map[key]
if val in args_map: if val in args_map:
val = args_map[val] val = args_map[val]
key, val = val, key
inplace_map[key] = val inplace_map[key] = val
forward_api_item['inplace'] = inplace_map forward_api_item['inplace'] = inplace_map
...@@ -208,6 +207,15 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): ...@@ -208,6 +207,15 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict):
args_map[param] if param in args_map else param args_map[param] if param in args_map else param
for param in backward_api_item['no_need_buffer'] for param in backward_api_item['no_need_buffer']
] ]
if backward_api_item['inplace']:
inplace_map = {}
for key, val in backward_api_item['inplace'].items():
if key in args_map:
key = args_map[key]
if val in args_map:
val = args_map[val]
inplace_map[key] = val
backward_api_item['inplace'] = inplace_map
def process_invoke_op(forward_api_dict, backward_api_dict): def process_invoke_op(forward_api_dict, backward_api_dict):
......
...@@ -274,7 +274,7 @@ DECLARE_INFER_SHAPE_FUNCTOR({{api["op_name"]}}, {{api["op_name"] | to_pascal_cas ...@@ -274,7 +274,7 @@ DECLARE_INFER_SHAPE_FUNCTOR({{api["op_name"]}}, {{api["op_name"] | to_pascal_cas
{% if api["inplace"] is not none %} {% if api["inplace"] is not none %}
{% set inplace_map %} {% set inplace_map %}
{% for source, target in api["inplace"].items() %} {% for source, target in api["inplace"].items() %}
{{"{"}}{{source | to_opmaker_name}}, {{target | to_opmaker_name}}{{"}"}}{{", " if not loop.last}} {{"{"}}{{target | to_opmaker_name}}, {{source | to_opmaker_name}}{{"}"}}{{", " if not loop.last}}
{%- endfor %} {%- endfor %}
{%- endset %} {%- endset %}
DECLARE_INPLACE_OP_INFERER({{api["op_name"] | to_pascal_case}}InplaceInferer, DECLARE_INPLACE_OP_INFERER({{api["op_name"] | to_pascal_case}}InplaceInferer,
......
...@@ -734,17 +734,6 @@ ...@@ -734,17 +734,6 @@
output : Tensor(weight_grad) output : Tensor(weight_grad)
invoke : embedding_grad_impl(x, weight, out_grad, padding_idx, sparse, weight_grad) invoke : embedding_grad_impl(x, weight, out_grad, padding_idx, sparse, weight_grad)
- backward_op : exp_grad
forward : exp (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : exp_grad
inplace : (out_grad -> x_grad)
- backward_op : expand_as_grad - backward_op : expand_as_grad
forward : expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out) forward : expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] target_shape) args : (Tensor x, Tensor out_grad, int[] target_shape)
......
...@@ -856,16 +856,6 @@ ...@@ -856,16 +856,6 @@
kernel : kernel :
func : equal_all func : equal_all
- op : exp
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : exp
inplace : (x -> out)
backward : exp_grad
- op : expand - op : expand
args : (Tensor x, IntArray shape) args : (Tensor x, IntArray shape)
output : Tensor output : Tensor
......
...@@ -274,6 +274,12 @@ ...@@ -274,6 +274,12 @@
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : exp
inputs :
x : X
outputs :
out : Out
- op : expand (expand_v2) - op : expand (expand_v2)
backward : expand_grad (expand_v2_grad) backward : expand_grad (expand_v2_grad)
extra : extra :
......
...@@ -33,6 +33,16 @@ ...@@ -33,6 +33,16 @@
func : cholesky_solve func : cholesky_solve
backward : cholesky_solve_grad backward : cholesky_solve_grad
- op : exp
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : exp
inplace : (x -> out)
backward : exp_grad
- op : cross - op : cross
args : (Tensor x, Tensor y, int axis = 9) args : (Tensor x, Tensor y, int axis = 9)
output : Tensor output : Tensor
......
...@@ -86,7 +86,6 @@ DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Softplus, ...@@ -86,7 +86,6 @@ DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Softplus,
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu, "relu", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu, "relu", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Tanh, "tanh", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Tanh, "tanh", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sigmoid, "sigmoid", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sigmoid, "sigmoid", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Exp, "exp", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Expm1, "expm1", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Expm1, "expm1", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Reciprocal, "reciprocal", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Reciprocal, "reciprocal", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sqrt, "sqrt", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sqrt, "sqrt", ); // NOLINT
...@@ -254,7 +253,6 @@ PD_REGISTER_ARG_MAPPING_FN(acosh_grad, phi::AcoshGradOpArgumentMapping); ...@@ -254,7 +253,6 @@ PD_REGISTER_ARG_MAPPING_FN(acosh_grad, phi::AcoshGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(atanh_grad, phi::AtanhGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(atanh_grad, phi::AtanhGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(relu_grad, phi::ReluGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(relu_grad, phi::ReluGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(exp_grad, phi::ExpGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(expm1_grad, phi::Expm1GradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(expm1_grad, phi::Expm1GradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(square_grad, phi::SquareGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(square_grad, phi::SquareGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reciprocal_grad, PD_REGISTER_ARG_MAPPING_FN(reciprocal_grad,
......
...@@ -18,8 +18,10 @@ from .layer_function_generator import ( ...@@ -18,8 +18,10 @@ from .layer_function_generator import (
generate_inplace_fn, generate_inplace_fn,
add_sample_code, add_sample_code,
) )
from ..fluid.framework import in_dygraph_mode from ..fluid.data_feeder import check_variable_and_dtype
from .. import _C_ops from ..fluid.framework import in_dygraph_mode, _in_legacy_dygraph
from ..framework import LayerHelper
from .. import _C_ops, _legacy_C_ops
__deprecated_func_name__ = { __deprecated_func_name__ = {
'tanh_shrink': 'tanhshrink', 'tanh_shrink': 'tanhshrink',
...@@ -37,7 +39,6 @@ __activations_noattr__ = [ ...@@ -37,7 +39,6 @@ __activations_noattr__ = [
] ]
__unary_func__ = [ __unary_func__ = [
'exp',
'expm1', 'expm1',
'atan', 'atan',
'sqrt', 'sqrt',
...@@ -158,22 +159,6 @@ Examples: ...@@ -158,22 +159,6 @@ Examples:
""", """,
) )
add_sample_code(
globals()["exp"],
r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.exp(x)
print(out)
# [0.67032005 0.81873075 1.10517092 1.34985881]
""",
)
add_sample_code( add_sample_code(
globals()["expm1"], globals()["expm1"],
r""" r"""
...@@ -561,6 +546,58 @@ Examples: ...@@ -561,6 +546,58 @@ Examples:
""", """,
) )
def exp(x, name=None):
"""
Computes exp of x element-wise with a natural number `e` as the base.
.. math::
out = e^x
Args:
x (Tensor): Input of Exp operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. Output of Exp operator, a Tensor with shape same as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.exp(x)
print(out)
# [0.67032005 0.81873075 1.10517092 1.34985881]
"""
if in_dygraph_mode():
return _C_ops.exp(x)
if _in_legacy_dygraph():
return _legacy_C_ops.exp(x)
check_variable_and_dtype(
x,
'x',
[
'int32',
'int64',
'float16',
'float32',
'float64',
'complex64',
'complex128',
],
'exp',
)
helper = LayerHelper('exp', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='exp', inputs={"X": x}, outputs={"Out": out})
return out
__all__ += ['erf'] __all__ += ['erf']
_erf_ = generate_layer_fn('erf') _erf_ = generate_layer_fn('erf')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册