未验证 提交 c5d99138 编写于 作者: Z zyfncg 提交者: GitHub

Generate static graph code for some activation ops by Yaml (part2) (#47440)

* gene static graph code for ceil, expm1 op

* gene static graph code for some activation op

* fix bug

* revert doc of silu and logsigmoid
上级 c923e6c9
...@@ -146,26 +146,6 @@ $$out = \frac{1}{1 + e^{-x}}$$ ...@@ -146,26 +146,6 @@ $$out = \frac{1}{1 + e^{-x}}$$
)DOC"; )DOC";
UNUSED constexpr char SiluDoc[] = R"DOC(
Silu Activation Operator
$$out = x * \\frac{1}{1 + e^{-x}}$$
)DOC";
UNUSED constexpr char LogSigmoidDoc[] = R"DOC(
Logsigmoid Activation Operator
$$out = \\log \\frac{1}{1 + e^{-x}}$$
)DOC";
UNUSED constexpr char Expm1Doc[] = R"DOC(
Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base.
$$out = e^x - 1$$
)DOC";
UNUSED constexpr char ReluDoc[] = R"DOC( UNUSED constexpr char ReluDoc[] = R"DOC(
Relu Activation Operator. Relu Activation Operator.
...@@ -206,43 +186,6 @@ $$out = \\frac{1}{\\sqrt{x}}$$ ...@@ -206,43 +186,6 @@ $$out = \\frac{1}{\\sqrt{x}}$$
)DOC"; )DOC";
UNUSED constexpr char CeilDoc[] = R"DOC(
Ceil Operator. Computes ceil of x element-wise.
.. math::
out = \left \lceil x \right \rceil
)DOC";
UNUSED constexpr char FloorDoc[] = R"DOC(
Floor Activation Operator. Computes floor of x element-wise.
$$out = \\lfloor x \\rfloor$$
)DOC";
UNUSED constexpr char RoundDoc[] = R"DOC(
The OP rounds the values in the input to the nearest integer value.
.. code-block:: text
input:
x.shape = [4]
x.data = [1.2, -0.9, 3.4, 0.9]
output:
out.shape = [4]
out.data = [1., -1., 3., 1.]
)DOC";
UNUSED constexpr char ReciprocalDoc[] = R"DOC(
Reciprocal Activation Operator.
$$out = \\frac{1}{x}$$
)DOC";
UNUSED constexpr char LogDoc[] = R"DOC( UNUSED constexpr char LogDoc[] = R"DOC(
Log Activation Operator. Log Activation Operator.
...@@ -252,33 +195,6 @@ Natural logarithm of x. ...@@ -252,33 +195,6 @@ Natural logarithm of x.
)DOC"; )DOC";
UNUSED constexpr char Log2Doc[] = R"DOC(
Log2 Activation Operator.
$$out = \log_2x$$
logarithm of x base to 2.
)DOC";
UNUSED constexpr char Log10Doc[] = R"DOC(
Log10 Activation Operator.
$$out = \log_10_x$$
logarithm of x base to 10.
)DOC";
UNUSED constexpr char Log1pDoc[] = R"DOC(
Log Activation Operator.
$out = \ln(x+1)$
Natural logarithm of x.
)DOC";
UNUSED constexpr char SquareDoc[] = R"DOC( UNUSED constexpr char SquareDoc[] = R"DOC(
The OP square each elements of the inputs. The OP square each elements of the inputs.
...@@ -356,28 +272,6 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -356,28 +272,6 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
} }
}; };
class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of HardShrink operator");
AddOutput("Out", "Output of HardShrink operator");
AddAttr<float>("threshold",
"The value of threshold for HardShrink. [default: 0.5]")
.SetDefault(0.5f);
AddComment(R"DOC(
:strong:`HardShrink activation operator`
.. math::
out = \begin{cases}
x, \text{if } x > \lambda \\
x, \text{if } x < -\lambda \\
0, \text{otherwise}
\end{cases}
)DOC");
}
};
class BReluOpMaker : public framework::OpProtoAndCheckerMaker { class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
...@@ -454,39 +348,6 @@ class ELUGradOpMaker : public framework::SingleGradOpMaker<T> { ...@@ -454,39 +348,6 @@ class ELUGradOpMaker : public framework::SingleGradOpMaker<T> {
} }
}; };
class LogitOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of Logit operator");
AddOutput("Out", "Output of Logit operator");
AddAttr<float>("eps",
"(float, default 1e-6f) the epsilon for input clamp bound")
.SetDefault(1e-6f);
AddComment(R"DOC(
Logit Operator.
this function is defined as follow:
$ logit=ln\left ( {\frac {x} {1-x}} \right ) $
)DOC");
}
};
template <typename T>
class LogitGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("logit_grad");
grad_op->SetInput("X", this->Input("X"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
grad_op->SetAttrMap(this->Attrs());
}
};
class CELUOpMaker : public framework::OpProtoAndCheckerMaker { class CELUOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
...@@ -591,31 +452,6 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -591,31 +452,6 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
} }
}; };
class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "An N-D Tensor with data type float32, float64. ");
AddOutput("Out", "A Tensor with the same shape as input. ");
AddAttr<float>("slope",
"The slope of the linear approximation of sigmoid. Its "
"value MUST BE positive. Default is 0.2. ")
.SetDefault(0.2f);
AddAttr<float>(
"offset",
"The offset of the linear approximation of sigmoid. Default is 0.5. ")
.SetDefault(0.5f);
AddComment(R"DOC(
HardSigmoid Activation Operator.
A 3-part piecewise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
which is much faster than sigmoid.
$$out = \max(0, \min(1, slope * x + offset))$$
)DOC");
}
};
class SwishOpMaker : public framework::OpProtoAndCheckerMaker { class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
...@@ -684,22 +520,12 @@ It is recommended to use the defaults for this activation. ...@@ -684,22 +520,12 @@ It is recommended to use the defaults for this activation.
}; };
REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc); REGISTER_ACTIVATION_OP_MAKER(Sigmoid, SigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Silu, SiluDoc);
REGISTER_ACTIVATION_OP_MAKER(LogSigmoid, LogSigmoidDoc);
REGISTER_ACTIVATION_OP_MAKER(Expm1, Expm1Doc);
REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc); REGISTER_ACTIVATION_OP_MAKER(Relu, ReluDoc);
REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc); REGISTER_ACTIVATION_OP_MAKER(Tanh, TanhDoc);
REGISTER_ACTIVATION_OP_MAKER(TanhShrink, TanhShrinkDoc); REGISTER_ACTIVATION_OP_MAKER(TanhShrink, TanhShrinkDoc);
REGISTER_ACTIVATION_OP_MAKER(Sqrt, SqrtDoc); REGISTER_ACTIVATION_OP_MAKER(Sqrt, SqrtDoc);
REGISTER_ACTIVATION_OP_MAKER(Rsqrt, RsqrtDoc); REGISTER_ACTIVATION_OP_MAKER(Rsqrt, RsqrtDoc);
REGISTER_ACTIVATION_OP_MAKER(Ceil, CeilDoc);
REGISTER_ACTIVATION_OP_MAKER(Floor, FloorDoc);
REGISTER_ACTIVATION_OP_MAKER(Round, RoundDoc);
REGISTER_ACTIVATION_OP_MAKER(Reciprocal, ReciprocalDoc);
REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc); REGISTER_ACTIVATION_OP_MAKER(Log, LogDoc);
REGISTER_ACTIVATION_OP_MAKER(Log2, Log2Doc);
REGISTER_ACTIVATION_OP_MAKER(Log10, Log10Doc);
REGISTER_ACTIVATION_OP_MAKER(Log1p, Log1pDoc);
REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc); REGISTER_ACTIVATION_OP_MAKER(Square, SquareDoc);
REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc); REGISTER_ACTIVATION_OP_MAKER(Softsign, SoftsignDoc);
...@@ -1093,73 +919,6 @@ DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer, ...@@ -1093,73 +919,6 @@ DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer,
DECLARE_INPLACE_OP_INFERER(ActivationTripleGradOpInplaceInferer, DECLARE_INPLACE_OP_INFERER(ActivationTripleGradOpInplaceInferer,
{"DDX", "D_DOut"}); {"DDX", "D_DOut"});
class LogitOp : public framework::OperatorWithKernel {
public:
LogitOp(const std::string& type,
const framework::VariableNameMap& inputs,
const framework::VariableNameMap& outputs,
const framework::AttributeMap& attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"),
true,
platform::errors::InvalidArgument(
"Input(%s) of LogitOp should not be null.", "X"));
PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"),
true,
platform::errors::InvalidArgument(
"Output(%s) of LogitOp should not be null.", "Out"));
ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
framework::LibraryType library{framework::LibraryType::kPlain};
phi::DataLayout layout = phi::DataLayout::kAnyLayout;
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
}
};
class LogitGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")),
true,
platform::errors::InvalidArgument(
"Input(%s) of LogitGradOp should not be null.", "DOut"));
PADDLE_ENFORCE_EQ(ctx->HasInput("X"),
true,
platform::errors::InvalidArgument(
"Input(%s) of LogitGradOp should not be null.", "X"));
PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")),
true,
platform::errors::InvalidArgument(
"Output(%s) of LogitGradOp should not be null.", "DX"));
auto x_grad_name = framework::GradVarName("X");
ctx->SetOutputDim(x_grad_name, ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ x_grad_name);
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
framework::LibraryType library{framework::LibraryType::kPlain};
phi::DataLayout layout = phi::DataLayout::kAnyLayout;
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.GetPlace(), layout, library);
}
};
template <typename T> template <typename T>
class PowGradOpMaker : public framework::SingleGradOpMaker<T> { class PowGradOpMaker : public framework::SingleGradOpMaker<T> {
public: public:
...@@ -1273,10 +1032,6 @@ REGISTER_ACTIVATION_OP(thresholded_relu, ...@@ -1273,10 +1032,6 @@ REGISTER_ACTIVATION_OP(thresholded_relu,
ThresholdedReluFunctor, ThresholdedReluFunctor,
ThresholdedReluGradFunctor); ThresholdedReluGradFunctor);
REGISTER_ACTIVATION_OP(relu6, Relu6, Relu6Functor, Relu6GradFunctor); REGISTER_ACTIVATION_OP(relu6, Relu6, Relu6Functor, Relu6GradFunctor);
REGISTER_ACTIVATION_OP(hard_shrink,
HardShrink,
HardShrinkFunctor,
HardShrinkGradFunctor);
REGISTER_ACTIVATION_OP(softshrink, REGISTER_ACTIVATION_OP(softshrink,
SoftShrink, SoftShrink,
SoftShrinkFunctor, SoftShrinkFunctor,
...@@ -1285,42 +1040,21 @@ REGISTER_ACTIVATION_OP(tanh_shrink, ...@@ -1285,42 +1040,21 @@ REGISTER_ACTIVATION_OP(tanh_shrink,
TanhShrink, TanhShrink,
TanhShrinkFunctor, TanhShrinkFunctor,
TanhShrinkGradFunctor); TanhShrinkGradFunctor);
REGISTER_ACTIVATION_OP(silu, Silu, SiluFunctor, SiluGradFunctor);
REGISTER_ACTIVATION_OP(softsign, REGISTER_ACTIVATION_OP(softsign,
Softsign, Softsign,
SoftsignFunctor, SoftsignFunctor,
SoftsignGradFunctor); SoftsignGradFunctor);
REGISTER_ACTIVATION_OP(hard_sigmoid,
HardSigmoid,
HardSigmoidFunctor,
HardSigmoidGradFunctor);
REGISTER_ACTIVATION_OP(logsigmoid,
LogSigmoid,
LogSigmoidFunctor,
LogSigmoidGradFunctor);
REGISTER_ACTIVATION_OP(expm1, Expm1, Expm1Functor, Expm1GradFunctor);
REGISTER_ACTIVATION_OP(softplus, REGISTER_ACTIVATION_OP(softplus,
Softplus, Softplus,
SoftplusFunctor, SoftplusFunctor,
SoftplusGradFunctor); SoftplusGradFunctor);
REGISTER_ACTIVATION_OP(mish, Mish, MishFunctor, MishGradFunctor); REGISTER_ACTIVATION_OP(mish, Mish, MishFunctor, MishGradFunctor);
REGISTER_ACTIVATION_OP(stanh, STanh, STanhFunctor, STanhGradFunctor); REGISTER_ACTIVATION_OP(stanh, STanh, STanhFunctor, STanhGradFunctor);
REGISTER_ACTIVATION_OP(reciprocal,
Reciprocal,
ReciprocalFunctor,
ReciprocalGradFunctor);
REGISTER_ACTIVATION_OP(log2, Log2, Log2Functor, Log2GradFunctor);
REGISTER_ACTIVATION_OP(log10, Log10, Log10Functor, Log10GradFunctor);
REGISTER_ACTIVATION_OP(log1p, Log1p, Log1pFunctor, Log1pGradFunctor);
REGISTER_ACTIVATION_OP(hard_swish, REGISTER_ACTIVATION_OP(hard_swish,
HardSwish, HardSwish,
HardSwishFunctor, HardSwishFunctor,
HardSwishGradFunctor); HardSwishGradFunctor);
REGISTER_ACTIVATION_OP(swish, Swish, SwishFunctor, SwishGradFunctor); REGISTER_ACTIVATION_OP(swish, Swish, SwishFunctor, SwishGradFunctor);
REGISTER_ACTIVATION_OP(round, Round, RoundFunctor, ZeroGradFunctor);
REGISTER_ACTIVATION_OP(floor, Floor, FloorFunctor, ZeroGradFunctor);
REGISTER_ACTIVATION_OP(ceil, Ceil, CeilFunctor, ZeroGradFunctor);
/* ========================== sigmoid register ============================= /* ========================== sigmoid register =============================
*/ */
...@@ -1459,17 +1193,6 @@ REGISTER_OPERATOR( ...@@ -1459,17 +1193,6 @@ REGISTER_OPERATOR(
/* ========================================================================== */ /* ========================================================================== */
/* ======================== logit register ============================
*/
REGISTER_OPERATOR(logit,
ops::LogitOp,
ops::LogitOpMaker,
ops::LogitGradOpMaker<paddle::framework::OpDesc>,
ops::LogitGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(logit_grad, ops::LogitGradOp);
/* ========================================================================== */
/* ======================== celu register ============================ /* ======================== celu register ============================
*/ */
REGISTER_OPERATOR( REGISTER_OPERATOR(
......
...@@ -106,6 +106,17 @@ ...@@ -106,6 +106,17 @@
func : bmm_grad func : bmm_grad
data_type : out_grad data_type : out_grad
- backward_op : ceil_grad
forward : ceil(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : ceil_grad
inplace : (out_grad -> x_grad)
- backward_op : cholesky_grad - backward_op : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out) forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper) args : (Tensor out, Tensor out_grad, bool upper)
...@@ -257,6 +268,17 @@ ...@@ -257,6 +268,17 @@
func : exp_grad func : exp_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_op : expm1_grad
forward : expm1 (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : expm1_grad
inplace : (out_grad -> x_grad)
- backward_op : fft_c2c_grad - backward_op : fft_c2c_grad
forward: fft_c2c(Tensor x, int64_t[] axes, str normalization, bool forward) -> Tensor(out) forward: fft_c2c(Tensor x, int64_t[] axes, str normalization, bool forward) -> Tensor(out)
args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward) args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward)
...@@ -295,6 +317,39 @@ ...@@ -295,6 +317,39 @@
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : flip(out_grad, axis) invoke : flip(out_grad, axis)
- backward_op : floor_grad
forward : floor(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : floor_grad
inplace : (out_grad -> x_grad)
- backward_op : hardshrink_grad
forward : hardshrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_shrink_grad
inplace : (out_grad -> x_grad)
- backward_op : hardsigmoid_grad
forward : hardsigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float slope, float offset)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : hard_sigmoid_grad
inplace : (out_grad -> x_grad)
- backward_op : lgamma_grad - backward_op : lgamma_grad
forward : lgamma(Tensor x) -> Tensor(out) forward : lgamma(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -305,6 +360,60 @@ ...@@ -305,6 +360,60 @@
kernel : kernel :
func : lgamma_grad func : lgamma_grad
- backward_op : log10_grad
forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log10_grad
inplace : (out_grad -> x_grad)
- backward_op : log1p_grad
forward : log1p (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log1p_grad
inplace : (out_grad -> x_grad)
- backward_op : log2_grad
forward : log2 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log2_grad
inplace : (out_grad -> x_grad)
- backward_op : logit_grad
forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float eps)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logit_grad
- backward_op : logsigmoid_grad
forward : logsigmoid (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logsigmoid_grad
inplace : (out_grad -> x_grad)
- backward_op : mv_grad - backward_op : mv_grad
forward : mv (Tensor x, Tensor vec) -> Tensor(out) forward : mv (Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad) args : (Tensor x, Tensor vec, Tensor out_grad)
...@@ -325,6 +434,28 @@ ...@@ -325,6 +434,28 @@
kernel : kernel :
func : poisson_grad func : poisson_grad
- backward_op : reciprocal_grad
forward : reciprocal (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : reciprocal_grad
inplace : (out_grad -> x_grad)
- backward_op : round_grad
forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : round_grad
inplace : (out_grad -> x_grad)
- backward_op : send_uv_grad - backward_op : send_uv_grad
forward : send_uv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") -> Tensor(out) forward : send_uv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") -> Tensor(out)
args: (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out_grad, str message_op = "ADD") args: (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out_grad, str message_op = "ADD")
...@@ -336,6 +467,17 @@ ...@@ -336,6 +467,17 @@
func : send_uv_grad func : send_uv_grad
data_type : x data_type : x
- backward_op : silu_grad
forward : silu (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : silu_grad
inplace : (out_grad -> x_grad)
- backward_op : sin_grad - backward_op : sin_grad
forward : sin (Tensor x) -> Tensor(out) forward : sin (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
...@@ -217,17 +217,6 @@ ...@@ -217,17 +217,6 @@
invoke : cast (out_grad, x.dtype()) invoke : cast (out_grad, x.dtype())
no_need_buffer : x no_need_buffer : x
- backward_op : ceil_grad
forward : ceil(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : ceil_grad
inplace : (out_grad -> x_grad)
- backward_op : celu_double_grad - backward_op : celu_double_grad
forward : celu_grad(Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x) forward : celu_grad(Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_out, Tensor grad_x_grad, float alpha) args : (Tensor x, Tensor grad_out, Tensor grad_x_grad, float alpha)
...@@ -621,17 +610,6 @@ ...@@ -621,17 +610,6 @@
no_need_buffer : x no_need_buffer : x
backward : expand_double_grad backward : expand_double_grad
- backward_op : expm1_grad
forward : expm1 (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : expm1_grad
inplace : (out_grad -> x_grad)
- backward_op : exponential__grad - backward_op : exponential__grad
forward : exponential_ (Tensor x, float lam) -> Tensor(out) forward : exponential_ (Tensor x, float lam) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
...@@ -684,17 +662,6 @@ ...@@ -684,17 +662,6 @@
layout: out_grad layout: out_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_op : floor_grad
forward : floor(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : floor_grad
inplace : (out_grad -> x_grad)
- backward_op : fmax_grad - backward_op : fmax_grad
forward : fmax(Tensor x, Tensor y, int axis) -> Tensor(out) forward : fmax(Tensor x, Tensor y, int axis) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis) args : (Tensor x, Tensor y, Tensor out_grad, int axis)
...@@ -802,28 +769,6 @@ ...@@ -802,28 +769,6 @@
kernel : kernel :
func : gumbel_softmax_grad func : gumbel_softmax_grad
- backward_op : hardshrink_grad
forward : hardshrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_shrink_grad
inplace : (out_grad -> x_grad)
- backward_op : hardsigmoid_grad
forward : hardsigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float slope, float offset)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : hard_sigmoid_grad
inplace : (out_grad -> x_grad)
- backward_op : hardswish_grad - backward_op : hardswish_grad
forward : hardswish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out) forward : hardswish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold, float scale, float offset) args : (Tensor x, Tensor out_grad, float threshold, float scale, float offset)
...@@ -1040,39 +985,6 @@ ...@@ -1040,39 +985,6 @@
func : linear_interp_grad func : linear_interp_grad
data_type : output_grad data_type : output_grad
- backward_op : log10_grad
forward : log10 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log10_grad
inplace : (out_grad -> x_grad)
- backward_op : log1p_grad
forward : log1p (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log1p_grad
inplace : (out_grad -> x_grad)
- backward_op : log2_grad
forward : log2 (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : log2_grad
inplace : (out_grad -> x_grad)
- backward_op : log_double_grad - backward_op : log_double_grad
forward : log_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x) forward : log_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_out, Tensor grad_x_grad) args : (Tensor x, Tensor grad_out, Tensor grad_x_grad)
...@@ -1126,27 +1038,6 @@ ...@@ -1126,27 +1038,6 @@
kernel : kernel :
func : logcumsumexp_grad func : logcumsumexp_grad
- backward_op : logit_grad
forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float eps)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logit_grad
- backward_op : logsigmoid_grad
forward : logsigmoid (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logsigmoid_grad
inplace : (out_grad -> x_grad)
- backward_op : logsumexp_grad - backward_op : logsumexp_grad
forward : logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out) forward : logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all) args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all)
...@@ -1625,17 +1516,6 @@ ...@@ -1625,17 +1516,6 @@
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : real_grad_impl(out_grad, x_grad) invoke : real_grad_impl(out_grad, x_grad)
- backward_op : reciprocal_grad
forward : reciprocal (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : reciprocal_grad
inplace : (out_grad -> x_grad)
- backward_op : reduce_prod_grad - backward_op : reduce_prod_grad
forward : reduce_prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out) forward : reduce_prod (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all) args : (Tensor x, Tensor out, Tensor out_grad, IntArray dims, bool keep_dim, bool reduce_all)
...@@ -1803,17 +1683,6 @@ ...@@ -1803,17 +1683,6 @@
data_type : x data_type : x
no_need_buffer : x no_need_buffer : x
- backward_op : round_grad
forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : round_grad
inplace : (out_grad -> x_grad)
- backward_op : rsqrt_double_grad - backward_op : rsqrt_double_grad
forward : rsqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x) forward : rsqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor out, Tensor grad_x, Tensor grad_x_grad) args : (Tensor out, Tensor grad_x, Tensor grad_x_grad)
...@@ -1964,17 +1833,6 @@ ...@@ -1964,17 +1833,6 @@
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : scale(out_grad, 0.0, 0.0, true) invoke : scale(out_grad, 0.0, 0.0, true)
- backward_op : silu_grad
forward : silu (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : silu_grad
inplace : (out_grad -> x_grad)
- backward_op : slice_double_grad - backward_op : slice_double_grad
forward : slice_grad (Tensor input, Tensor grad_out, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(grad_input) forward : slice_grad (Tensor input, Tensor grad_out, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(grad_input)
args : (Tensor grad_input_grad, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) args : (Tensor grad_input_grad, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
...@@ -2068,7 +1926,6 @@ ...@@ -2068,7 +1926,6 @@
args : (Tensor[] out_grad, Scalar axis = -1) args : (Tensor[] out_grad, Scalar axis = -1)
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : concat( out_grad, axis) invoke : concat( out_grad, axis)
# TODO(zhangyunfei) The config of double grad and triple grad will be supported in the future.
- backward_op : sqrt_double_grad - backward_op : sqrt_double_grad
forward : sqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x) forward : sqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
......
...@@ -357,16 +357,6 @@ ...@@ -357,16 +357,6 @@
data_type : x data_type : x
backward : cast_grad backward : cast_grad
- op : ceil
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : ceil
inplace : (x -> out)
backward : ceil_grad
- op : celu - op : celu
args : (Tensor x, float alpha) args : (Tensor x, float alpha)
output : Tensor(out) output : Tensor(out)
...@@ -757,16 +747,6 @@ ...@@ -757,16 +747,6 @@
optional : y optional : y
backward : expand_as_grad backward : expand_as_grad
- op : expm1
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : expm1
backward : expm1_grad
- op : exponential_ - op : exponential_
args : (Tensor x, float lam) args : (Tensor x, float lam)
output : Tensor(out) output : Tensor(out)
...@@ -834,16 +814,6 @@ ...@@ -834,16 +814,6 @@
intermediate : xshape intermediate : xshape
backward : flatten_grad backward : flatten_grad
- op : floor
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : floor
inplace : (x -> out)
backward : floor_grad
- op : floor_divide - op : floor_divide
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
...@@ -1046,26 +1016,6 @@ ...@@ -1046,26 +1016,6 @@
func : gumbel_softmax func : gumbel_softmax
backward : gumbel_softmax_grad backward : gumbel_softmax_grad
- op : hardshrink
args : (Tensor x, float threshold)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_shrink
backward : hardshrink_grad
- op : hardsigmoid
args : (Tensor x, float slope, float offset)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_sigmoid
backward : hardsigmoid_grad
- op : hardswish - op : hardswish
args : (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) args : (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
output : Tensor output : Tensor
...@@ -1359,33 +1309,6 @@ ...@@ -1359,33 +1309,6 @@
func : log func : log
backward: log_grad backward: log_grad
- op : log10
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log10
backward: log10_grad
- op : log1p
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log1p
backward: log1p_grad
- op : log2
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log2
backward: log2_grad
- op : log_loss - op : log_loss
args : (Tensor input, Tensor label, float epsilon) args : (Tensor input, Tensor label, float epsilon)
output : Tensor output : Tensor
...@@ -1445,25 +1368,6 @@ ...@@ -1445,25 +1368,6 @@
kernel : kernel :
func : logical_xor func : logical_xor
- op : logit
args : (Tensor x, float eps = 1e-6f)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logit
backward : logit_grad
- op : logsigmoid
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : logsigmoid
backward : logsigmoid_grad
- op : logsumexp - op : logsumexp
args : (Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) args : (Tensor x, int64_t[] axis, bool keepdim, bool reduce_all)
output : Tensor(out) output : Tensor(out)
...@@ -1989,16 +1893,6 @@ ...@@ -1989,16 +1893,6 @@
func : real func : real
backward : real_grad backward : real_grad
- op : reciprocal
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : reciprocal
inplace : (x -> out)
backward : reciprocal_grad
- op : reduce_prod - op : reduce_prod
args : (Tensor x, IntArray dims, bool keep_dim, bool reduce_all) args : (Tensor x, IntArray dims, bool keep_dim, bool reduce_all)
output : Tensor output : Tensor
...@@ -2130,16 +2024,6 @@ ...@@ -2130,16 +2024,6 @@
func : roll func : roll
backward : roll_grad backward : roll_grad
- op : round
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : round
inplace : (x -> out)
backward : round_grad
- op : rsqrt - op : rsqrt
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
...@@ -2295,15 +2179,6 @@ ...@@ -2295,15 +2179,6 @@
func : sign func : sign
backward : sign_grad backward : sign_grad
- op : silu
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : silu
backward : silu_grad
- op : slice - op : slice
args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) args : (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
output : Tensor output : Tensor
......
...@@ -120,6 +120,10 @@ ...@@ -120,6 +120,10 @@
- op : ceil - op : ceil
backward : ceil_grad backward : ceil_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
...@@ -347,6 +351,10 @@ ...@@ -347,6 +351,10 @@
- op : expm1 - op : expm1
backward : expm1_grad backward : expm1_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
...@@ -398,6 +406,10 @@ ...@@ -398,6 +406,10 @@
- op : floor - op : floor
backward : floor_grad backward : floor_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
...@@ -457,6 +469,20 @@ ...@@ -457,6 +469,20 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : hardshrink (hard_shrink)
backward : hardshrink_grad (hard_shrink_grad)
inputs :
x : X
outputs :
out : Out
- op : hardsigmoid (hard_sigmoid)
backward : hardsigmoid_grad (hard_sigmoid_grad)
inputs :
x : X
outputs :
out : Out
- op : heaviside (elementwise_heaviside) - op : heaviside (elementwise_heaviside)
backward : heaviside_grad (elementwise_heaviside_grad) backward : heaviside_grad (elementwise_heaviside_grad)
extra : extra :
...@@ -496,16 +522,28 @@ ...@@ -496,16 +522,28 @@
- op : log10 - op : log10
backward : log10_grad backward : log10_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : log1p - op : log1p
backward : log1p_grad backward : log1p_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : log2 - op : log2
backward : log2_grad backward : log2_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
...@@ -514,6 +552,18 @@ ...@@ -514,6 +552,18 @@
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- op : logit
inputs :
x : X
outputs :
out : Out
- op : logsigmoid
inputs :
x : X
outputs :
out : Out
- op : logsigmoid - op : logsigmoid
backward : logsigmoid_grad backward : logsigmoid_grad
extra : extra :
...@@ -620,6 +670,10 @@ ...@@ -620,6 +670,10 @@
- op : reciprocal - op : reciprocal
backward : reciprocal_grad backward : reciprocal_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
...@@ -688,6 +742,10 @@ ...@@ -688,6 +742,10 @@
- op : round - op : round
backward : round_grad backward : round_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
...@@ -728,6 +786,10 @@ ...@@ -728,6 +786,10 @@
- op : silu - op : silu
backward : silu_grad backward : silu_grad
inputs :
x : X
outputs :
out : Out
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
......
...@@ -96,6 +96,16 @@ ...@@ -96,6 +96,16 @@
func : bmm func : bmm
backward : bmm_grad backward : bmm_grad
- op : ceil
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : ceil
inplace : (x -> out)
backward : ceil_grad
- op : cholesky - op : cholesky
args : (Tensor x, bool upper=false) args : (Tensor x, bool upper=false)
output : Tensor output : Tensor
...@@ -226,6 +236,16 @@ ...@@ -226,6 +236,16 @@
inplace : (x -> out) inplace : (x -> out)
backward : exp_grad backward : exp_grad
- op : expm1
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : expm1
backward : expm1_grad
- op : fft_c2c - op : fft_c2c
args : (Tensor x, int64_t[] axes, str normalization, bool forward) args : (Tensor x, int64_t[] axes, str normalization, bool forward)
output : Tensor output : Tensor
...@@ -262,6 +282,36 @@ ...@@ -262,6 +282,36 @@
func : flip func : flip
backward : flip_grad backward : flip_grad
- op : floor
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : floor
inplace : (x -> out)
backward : floor_grad
- op : hardshrink
args : (Tensor x, float threshold = 0.5)
output : Tensor (out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_shrink
backward : hardshrink_grad
- op : hardsigmoid
args : (Tensor x, float slope = 0.2, float offset = 0.5)
output : Tensor (out)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_sigmoid
backward : hardsigmoid_grad
- op : lgamma - op : lgamma
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
...@@ -271,6 +321,52 @@ ...@@ -271,6 +321,52 @@
func : lgamma func : lgamma
backward : lgamma_grad backward : lgamma_grad
- op : log10
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log10
backward: log10_grad
- op : log1p
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log1p
backward: log1p_grad
- op : log2
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : log2
backward: log2_grad
- op : logit
args : (Tensor x, float eps = 1e-6f)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logit
backward : logit_grad
- op : logsigmoid
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : logsigmoid
backward : logsigmoid_grad
- op : mv - op : mv
args : (Tensor x, Tensor vec) args : (Tensor x, Tensor vec)
output : Tensor output : Tensor
...@@ -289,6 +385,26 @@ ...@@ -289,6 +385,26 @@
func : poisson func : poisson
backward : poisson_grad backward : poisson_grad
- op : reciprocal
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : reciprocal
inplace : (x -> out)
backward : reciprocal_grad
- op : round
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
kernel :
func : round
inplace : (x -> out)
backward : round_grad
- op : send_uv - op : send_uv
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD")
output : Tensor(out) output : Tensor(out)
...@@ -299,6 +415,15 @@ ...@@ -299,6 +415,15 @@
data_type : x data_type : x
backward : send_uv_grad backward : send_uv_grad
- op : silu
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : silu
backward : silu_grad
- op : sin - op : sin
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
...@@ -47,16 +47,10 @@ DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(ThresholdedRelu, ...@@ -47,16 +47,10 @@ DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(ThresholdedRelu,
"thresholded_relu", "thresholded_relu",
"threshold"); "threshold");
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(SoftShrink, "soft_shrink", "lambda"); DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(SoftShrink, "soft_shrink", "lambda");
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardShrink, "hard_shrink", "threshold");
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Mish, "mish", "threshold"); DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Mish, "mish", "threshold");
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(TanhShrink, "tanh_shrink", ); // NOLINT DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(TanhShrink, "tanh_shrink", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Silu, "silu", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Softsign, "softsign", ); // NOLINT DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Softsign, "softsign", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(LogSigmoid, "logsigmoid", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Log, "log", ); // NOLINT DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Log, "log", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Log2, "log2", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Log10, "log10", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Log1p, "log1p", ); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Celu, "celu", "alpha"); // NOLINT DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Celu, "celu", "alpha"); // NOLINT
DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardSwish, DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(HardSwish,
"hard_swish", "hard_swish",
...@@ -75,15 +69,10 @@ DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Softplus, ...@@ -75,15 +69,10 @@ DEFINE_ACT_GRAD_DEPX_OP_ARGMAP(Softplus,
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu, "relu", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu, "relu", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Tanh, "tanh", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Tanh, "tanh", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sigmoid, "sigmoid", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sigmoid, "sigmoid", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Expm1, "expm1", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Reciprocal, "reciprocal", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sqrt, "sqrt", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Sqrt, "sqrt", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Rsqrt, "rsqrt", ); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Rsqrt, "rsqrt", ); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu6, "relu6", "threshold"); // NOLINT DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(Relu6, "relu6", "threshold"); // NOLINT
DEFINE_ACT_GRAD_DEPOUT_OP_ARGMAP(HardSigmoid,
"hard_sigmoid",
"slope" comma "offset"); // NOLINT
KernelSignature SqrtActiOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature SqrtActiOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.IsDenseTensorInput("X")) { if (ctx.IsDenseTensorInput("X")) {
return KernelSignature("sqrt", {"X"}, {}, {"Out"}); return KernelSignature("sqrt", {"X"}, {}, {"Out"});
...@@ -100,10 +89,6 @@ KernelSignature SquareActiOpArgumentMapping(const ArgumentMappingContext& ctx) { ...@@ -100,10 +89,6 @@ KernelSignature SquareActiOpArgumentMapping(const ArgumentMappingContext& ctx) {
} }
} }
DEFINE_ACT_GRAD_NODEP_OP_ARGMAP(Round, "round", ); // NOLINT
DEFINE_ACT_GRAD_NODEP_OP_ARGMAP(Floor, "floor", ); // NOLINT
DEFINE_ACT_GRAD_NODEP_OP_ARGMAP(Ceil, "ceil", ); // NOLINT
KernelSignature ReluDoubleGradOpArgumentMapping( KernelSignature ReluDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature("relu_double_grad", {"Out", "DDX"}, {}, {"DDOut"}); return KernelSignature("relu_double_grad", {"Out", "DDX"}, {}, {"DDOut"});
...@@ -151,10 +136,6 @@ KernelSignature EluOpArgumentMapping(const ArgumentMappingContext& ctx) { ...@@ -151,10 +136,6 @@ KernelSignature EluOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("elu", {"X"}, {"alpha"}, {"Out"}); return KernelSignature("elu", {"X"}, {"alpha"}, {"Out"});
} }
KernelSignature LogitGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("logit_grad", {"X", "Out@GRAD"}, {"eps"}, {"X@GRAD"});
}
KernelSignature EluGradOpArgumentMapping(const ArgumentMappingContext& ctx) { KernelSignature EluGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"elu_grad", {"X", "Out", "Out@GRAD"}, {"alpha"}, {"X@GRAD"}); "elu_grad", {"X", "Out", "Out@GRAD"}, {"alpha"}, {"X@GRAD"});
...@@ -233,10 +214,7 @@ PD_REGISTER_BASE_KERNEL_NAME(brelu_grad, hard_tanh_grad); ...@@ -233,10 +214,7 @@ PD_REGISTER_BASE_KERNEL_NAME(brelu_grad, hard_tanh_grad);
PD_REGISTER_ARG_MAPPING_FN(relu_grad, phi::ReluGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(relu_grad, phi::ReluGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(expm1_grad, phi::Expm1GradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(square_grad, phi::SquareGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(square_grad, phi::SquareGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(reciprocal_grad,
phi::ReciprocalGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sqrt_grad, phi::SqrtGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(sqrt_grad, phi::SqrtGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sqrt_grad_grad, PD_REGISTER_ARG_MAPPING_FN(sqrt_grad_grad,
phi::SqrtDoubleGradOpArgumentMapping); phi::SqrtDoubleGradOpArgumentMapping);
...@@ -265,40 +243,26 @@ PD_REGISTER_ARG_MAPPING_FN(thresholded_relu_grad, ...@@ -265,40 +243,26 @@ PD_REGISTER_ARG_MAPPING_FN(thresholded_relu_grad,
PD_REGISTER_ARG_MAPPING_FN(relu6_grad, phi::Relu6GradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(relu6_grad, phi::Relu6GradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(softshrink_grad, PD_REGISTER_ARG_MAPPING_FN(softshrink_grad,
phi::SoftShrinkGradOpArgumentMapping); phi::SoftShrinkGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(hard_shrink_grad,
phi::HardShrinkGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(tanh_shrink_grad, PD_REGISTER_ARG_MAPPING_FN(tanh_shrink_grad,
phi::TanhShrinkGradOpArgumentMapping); phi::TanhShrinkGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elu, phi::EluOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(elu, phi::EluOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elu_grad, phi::EluGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(elu_grad, phi::EluGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elu_grad_grad, phi::EluDoubleGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(elu_grad_grad, phi::EluDoubleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(silu_grad, phi::SiluGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(softsign_grad, phi::SoftsignGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(softsign_grad, phi::SoftsignGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sigmoid_grad, phi::SigmoidGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(sigmoid_grad, phi::SigmoidGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sigmoid_grad_grad, PD_REGISTER_ARG_MAPPING_FN(sigmoid_grad_grad,
phi::SigmoidDoubleGradOpArgumentMapping); phi::SigmoidDoubleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sigmoid_triple_grad, PD_REGISTER_ARG_MAPPING_FN(sigmoid_triple_grad,
phi::SigmoidTripleGradOpArgumentMapping); phi::SigmoidTripleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(logsigmoid_grad,
phi::LogSigmoidGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(hard_sigmoid_grad,
phi::HardSigmoidGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(logit_grad, phi::LogitGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(log_grad, phi::LogGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(log_grad, phi::LogGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(log_grad_grad, phi::LogDoubleGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(log_grad_grad, phi::LogDoubleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(log2_grad, phi::Log2GradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(log10_grad, phi::Log10GradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(log1p_grad, phi::Log1pGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(sqrt, phi::SqrtActiOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(sqrt, phi::SqrtActiOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(square, phi::SquareActiOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(square, phi::SquareActiOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(hard_swish_grad, PD_REGISTER_ARG_MAPPING_FN(hard_swish_grad,
phi::HardSwishGradOpArgumentMapping); phi::HardSwishGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(swish_grad, phi::SwishGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(swish_grad, phi::SwishGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(round_grad, phi::RoundGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(floor_grad, phi::FloorGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(ceil_grad, phi::CeilGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pow_grad, phi::PowGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(pow_grad, phi::PowGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pow, phi::PowOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(pow, phi::PowOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(celu_grad, phi::CeluGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(celu_grad, phi::CeluGradOpArgumentMapping);
......
...@@ -39,14 +39,9 @@ __activations_noattr__ = [ ...@@ -39,14 +39,9 @@ __activations_noattr__ = [
] ]
__unary_func__ = [ __unary_func__ = [
'expm1',
'sqrt', 'sqrt',
'rsqrt', 'rsqrt',
'abs', 'abs',
'ceil',
'floor',
'round',
'reciprocal',
'square', 'square',
] ]
...@@ -119,15 +114,12 @@ add_sample_code( ...@@ -119,15 +114,12 @@ add_sample_code(
r""" r"""
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0]) x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
out = F.silu(x) out = F.silu(x)
print(out) print(out)
# [ 0.7310586 1.7615942 2.8577224, 3.9280552 ] # [ 0.7310586 1.7615942 2.8577224, 3.9280552 ]
""", """,
) )
...@@ -136,31 +128,12 @@ add_sample_code( ...@@ -136,31 +128,12 @@ add_sample_code(
r""" r"""
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.nn.functional as F import paddle.nn.functional as F
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3]) x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = F.log_sigmoid(x) out = F.log_sigmoid(x)
print(out) print(out)
# [-0.91301525 -0.79813887 -0.64439666 -0.55435524] # [-0.91301525 -0.79813887 -0.64439666 -0.55435524]
""",
)
add_sample_code(
globals()["expm1"],
r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.expm1(x)
print(out)
# [-0.32967997, -0.18126924, 0.10517092, 0.34985882]
""", """,
) )
...@@ -245,70 +218,6 @@ Examples: ...@@ -245,70 +218,6 @@ Examples:
""", """,
) )
add_sample_code(
globals()["ceil"],
r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.ceil(x)
print(out)
# [-0. -0. 1. 1.]
""",
)
add_sample_code(
globals()["floor"],
r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.floor(x)
print(out)
# [-1. -1. 0. 0.]
""",
)
add_sample_code(
globals()["round"],
r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
out = paddle.round(x)
print(out)
# [-1. -0. 1. 2.]
""",
)
add_sample_code(
globals()["reciprocal"],
r"""
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.reciprocal(x)
print(out)
# [-2.5 -5. 10. 3.33333333]
""",
)
add_sample_code( add_sample_code(
globals()["square"], globals()["square"],
r""" r"""
...@@ -582,6 +491,44 @@ def atanh(x, name=None): ...@@ -582,6 +491,44 @@ def atanh(x, name=None):
return out return out
def ceil(x, name=None):
"""
Ceil Operator. Computes ceil of x element-wise.
.. math::
out = \\left \\lceil x \\right \\rceil
Args:
x (Tensor): Input of Ceil operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. Output of Ceil operator, a Tensor with shape same as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.ceil(x)
print(out)
# [-0. -0. 1. 1.]
"""
if in_dygraph_mode():
return _C_ops.ceil(x)
if _in_legacy_dygraph():
return _legacy_C_ops.ceil(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'ceil')
helper = LayerHelper('ceil', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='ceil', inputs={"X": x}, outputs={"Out": out})
return out
def cos(x, name=None): def cos(x, name=None):
""" """
Cosine Operator. Computes cosine of x element-wise. Cosine Operator. Computes cosine of x element-wise.
...@@ -627,18 +574,18 @@ def cosh(x, name=None): ...@@ -627,18 +574,18 @@ def cosh(x, name=None):
Input range `(-inf, inf)`, output range `(1, inf)`. Input range `(-inf, inf)`, output range `(1, inf)`.
.. math:: .. math::
out = \frac{exp(x)+exp(-x)}{2} out = \\frac{exp(x)+exp(-x)}{2}
Args: Args:
x (Tensor): Input of Cosh operator, an N-D Tensor, with data type float32, float64 or float16. x (Tensor): Input of Cosh operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns: Returns:
Tensor. Output of Cosh operator, a Tensor with shape same as input. Tensor. Output of Cosh operator, a Tensor with shape same as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
...@@ -711,6 +658,167 @@ def exp(x, name=None): ...@@ -711,6 +658,167 @@ def exp(x, name=None):
return out return out
def expm1(x, name=None):
"""
Expm1 Operator. Computes expm1 of x element-wise with a natural number :math:`e` as the base.
.. math::
out = e^x - 1
Args:
x (Tensor): Input of Expm1 operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. Output of Expm1 operator, a Tensor with shape same as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.expm1(x)
print(out)
# [-0.32967997, -0.18126924, 0.10517092, 0.34985882]
"""
if in_dygraph_mode():
return _C_ops.expm1(x)
if _in_legacy_dygraph():
return _legacy_C_ops.expm1(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'expm1')
helper = LayerHelper('expm1', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='expm1', inputs={"X": x}, outputs={"Out": out})
return out
def floor(x, name=None):
"""
Floor Activation Operator. Computes floor of x element-wise.
.. math::
out = \\lfloor x \\rfloor
Args:
x (Tensor): Input of Floor operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. Output of Floor operator, a Tensor with shape same as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.floor(x)
print(out)
# [-1. -1. 0. 0.]
"""
if in_dygraph_mode():
return _C_ops.floor(x)
if _in_legacy_dygraph():
return _legacy_C_ops.floor(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'floor')
helper = LayerHelper('floor', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='floor', inputs={"X": x}, outputs={"Out": out})
return out
def reciprocal(x, name=None):
"""
Reciprocal Activation Operator.
.. math::
out = \\frac{1}{x}
Args:
x (Tensor): Input of Reciprocal operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. Output of Reciprocal operator, a Tensor with shape same as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.4, -0.2, 0.1, 0.3])
out = paddle.reciprocal(x)
print(out)
# [-2.5 -5. 10. 3.33333333]
"""
if in_dygraph_mode():
return _C_ops.reciprocal(x)
if _in_legacy_dygraph():
return _legacy_C_ops.reciprocal(x)
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'reciprocal'
)
helper = LayerHelper('reciprocal', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='reciprocal', inputs={"X": x}, outputs={"Out": out})
return out
def round(x, name=None):
"""
Round the values in the input to the nearest integer value.
.. code-block:: text
input:
x.shape = [4]
x.data = [1.2, -0.9, 3.4, 0.9]
output:
out.shape = [4]
out.data = [1., -1., 3., 1.]
Args:
x (Tensor): Input of Round operator, an N-D Tensor, with data type float32, float64 or float16.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. Output of Round operator, a Tensor with shape same as input.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([-0.5, -0.2, 0.6, 1.5])
out = paddle.round(x)
print(out)
# [-1. -0. 1. 2.]
"""
if in_dygraph_mode():
return _C_ops.round(x)
if _in_legacy_dygraph():
return _legacy_C_ops.round(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'round')
helper = LayerHelper('round', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='round', inputs={"X": x}, outputs={"Out": out})
return out
def sin(x, name=None): def sin(x, name=None):
""" """
Sine Activation Operator. Sine Activation Operator.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册