未验证 提交 bff0cbfc 编写于 作者: F fengjiayi 提交者: GitHub

Merge pull request #7025 from JiayiFeng/rename_output_of_softmax_and_activitions

Rename output of softmax and activations
...@@ -22,8 +22,8 @@ class ActivationOp : public framework::OperatorWithKernel { ...@@ -22,8 +22,8 @@ class ActivationOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
ctx->SetOutputDim("Y", ctx->GetInputDim("X")); ctx->SetOutputDim("Out", ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ "Y"); ctx->ShareLoD("X", /*->*/ "Out");
} }
}; };
...@@ -32,7 +32,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel { ...@@ -32,7 +32,7 @@ class ActivationOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Y")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out"));
} }
}; };
...@@ -41,11 +41,11 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -41,11 +41,11 @@ class SigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) SigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Sigmoid operator"); AddInput("X", "Input of Sigmoid operator");
AddOutput("Y", "Output of Sigmoid operator"); AddOutput("Out", "Output of Sigmoid operator");
AddComment(R"DOC( AddComment(R"DOC(
Sigmoid Activation Operator Sigmoid Activation Operator
$$y = \frac{1}{1 + e^{-x}}$$ $$out = \frac{1}{1 + e^{-x}}$$
)DOC"); )DOC");
} }
...@@ -56,11 +56,11 @@ class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -56,11 +56,11 @@ class LogSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
LogSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) LogSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of LogSigmoid operator"); AddInput("X", "Input of LogSigmoid operator");
AddOutput("Y", "Output of LogSigmoid operator"); AddOutput("Out", "Output of LogSigmoid operator");
AddComment(R"DOC( AddComment(R"DOC(
Logsigmoid Activation Operator Logsigmoid Activation Operator
$$y = \log \frac{1}{1 + e^{-x}}$$ $$out = \log \frac{1}{1 + e^{-x}}$$
)DOC"); )DOC");
} }
...@@ -71,11 +71,11 @@ class ExpOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -71,11 +71,11 @@ class ExpOpMaker : public framework::OpProtoAndCheckerMaker {
ExpOpMaker(OpProto *proto, OpAttrChecker *op_checker) ExpOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Exp operator"); AddInput("X", "Input of Exp operator");
AddOutput("Y", "Output of Exp operator"); AddOutput("Out", "Output of Exp operator");
AddComment(R"DOC( AddComment(R"DOC(
Exp Activation Operator. Exp Activation Operator.
$y = e^x$ $out = e^x$
)DOC"); )DOC");
} }
...@@ -86,11 +86,11 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -86,11 +86,11 @@ class ReluOpMaker : public framework::OpProtoAndCheckerMaker {
ReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) ReluOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Relu operator"); AddInput("X", "Input of Relu operator");
AddOutput("Y", "Output of Relu operator"); AddOutput("Out", "Output of Relu operator");
AddComment(R"DOC( AddComment(R"DOC(
Relu Activation Operator. Relu Activation Operator.
$y = \max(x, 0)$ $out = \max(x, 0)$
)DOC"); )DOC");
} }
...@@ -101,12 +101,12 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -101,12 +101,12 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
LeakyReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) LeakyReluOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of LeakyRelu operator"); AddInput("X", "Input of LeakyRelu operator");
AddOutput("Y", "Output of LeakyRelu operator"); AddOutput("Out", "Output of LeakyRelu operator");
AddAttr<float>("alpha", "The small negative slope").SetDefault(0.02f); AddAttr<float>("alpha", "The small negative slope").SetDefault(0.02f);
AddComment(R"DOC( AddComment(R"DOC(
LeakyRelu Activation Operator. LeakyRelu Activation Operator.
$y = \max(x, \alpha * x)$ $out = \max(x, \alpha * x)$
)DOC"); )DOC");
} }
...@@ -117,13 +117,13 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -117,13 +117,13 @@ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
SoftShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) SoftShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Softshrink operator"); AddInput("X", "Input of Softshrink operator");
AddOutput("Y", "Output of Softshrink operator"); AddOutput("Out", "Output of Softshrink operator");
AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f); AddAttr<float>("lambda", "non-negative offset").SetDefault(0.5f);
AddComment(R"DOC( AddComment(R"DOC(
Softshrink Activation Operator. Softshrink Activation Operator.
$$ $$
y = \begin{cases} out = \begin{cases}
x - \lambda, \text{if } x > \lambda \\ x - \lambda, \text{if } x > \lambda \\
x + \lambda, \text{if } x < -\lambda \\ x + \lambda, \text{if } x < -\lambda \\
0, \text{otherwise} 0, \text{otherwise}
...@@ -139,11 +139,11 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -139,11 +139,11 @@ class TanhOpMaker : public framework::OpProtoAndCheckerMaker {
TanhOpMaker(OpProto *proto, OpAttrChecker *op_checker) TanhOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Tanh operator"); AddInput("X", "Input of Tanh operator");
AddOutput("Y", "Output of Tanh operator"); AddOutput("Out", "Output of Tanh operator");
AddComment(R"DOC( AddComment(R"DOC(
Tanh Activation Operator. Tanh Activation Operator.
$$y = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ $$out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
)DOC"); )DOC");
} }
...@@ -154,11 +154,11 @@ class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -154,11 +154,11 @@ class TanhShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
TanhShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) TanhShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of TanhShrink operator"); AddInput("X", "Input of TanhShrink operator");
AddOutput("Y", "Output of TanhShrink operator"); AddOutput("Out", "Output of TanhShrink operator");
AddComment(R"DOC( AddComment(R"DOC(
TanhShrink Activation Operator. TanhShrink Activation Operator.
$$y = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ $$out = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$
)DOC"); )DOC");
} }
...@@ -169,14 +169,14 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -169,14 +169,14 @@ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker {
HardShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) HardShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of HardShrink operator"); AddInput("X", "Input of HardShrink operator");
AddOutput("Y", "Output of HardShrink operator"); AddOutput("Out", "Output of HardShrink operator");
AddAttr<float>("threshold", "The value of threshold for HardShrink") AddAttr<float>("threshold", "The value of threshold for HardShrink")
.SetDefault(0.5f); .SetDefault(0.5f);
AddComment(R"DOC( AddComment(R"DOC(
HardShrink Activation Operator. HardShrink Activation Operator.
$$ $$
y = \begin{cases} out = \begin{cases}
x, \text{if } x > \lambda \\ x, \text{if } x > \lambda \\
x, \text{if } x < -\lambda \\ x, \text{if } x < -\lambda \\
0, \text{otherwise} 0, \text{otherwise}
...@@ -192,11 +192,11 @@ class SqrtOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -192,11 +192,11 @@ class SqrtOpMaker : public framework::OpProtoAndCheckerMaker {
SqrtOpMaker(OpProto *proto, OpAttrChecker *op_checker) SqrtOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Sqrt operator"); AddInput("X", "Input of Sqrt operator");
AddOutput("Y", "Output of Sqrt operator"); AddOutput("Out", "Output of Sqrt operator");
AddComment(R"DOC( AddComment(R"DOC(
Sqrt Activation Operator. Sqrt Activation Operator.
$y = \sqrt{x}$ $out = \sqrt{x}$
)DOC"); )DOC");
} }
...@@ -207,11 +207,11 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -207,11 +207,11 @@ class AbsOpMaker : public framework::OpProtoAndCheckerMaker {
AbsOpMaker(OpProto *proto, OpAttrChecker *op_checker) AbsOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Abs operator"); AddInput("X", "Input of Abs operator");
AddOutput("Y", "Output of Abs operator"); AddOutput("Out", "Output of Abs operator");
AddComment(R"DOC( AddComment(R"DOC(
Abs Activation Operator. Abs Activation Operator.
$y = |x|$ $out = |x|$
)DOC"); )DOC");
} }
...@@ -222,11 +222,11 @@ class CeilOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -222,11 +222,11 @@ class CeilOpMaker : public framework::OpProtoAndCheckerMaker {
CeilOpMaker(OpProto *proto, OpAttrChecker *op_checker) CeilOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Ceil operator"); AddInput("X", "Input of Ceil operator");
AddOutput("Y", "Output of Ceil operator"); AddOutput("Out", "Output of Ceil operator");
AddComment(R"DOC( AddComment(R"DOC(
Ceil Activation Operator. Ceil Activation Operator.
$y = ceil(x)$ $out = ceil(x)$
)DOC"); )DOC");
} }
...@@ -237,11 +237,11 @@ class FloorOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -237,11 +237,11 @@ class FloorOpMaker : public framework::OpProtoAndCheckerMaker {
FloorOpMaker(OpProto *proto, OpAttrChecker *op_checker) FloorOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Floor operator"); AddInput("X", "Input of Floor operator");
AddOutput("Y", "Output of Floor operator"); AddOutput("Out", "Output of Floor operator");
AddComment(R"DOC( AddComment(R"DOC(
Floor Activation Operator. Floor Activation Operator.
$y = floor(x)$ $out = floor(x)$
)DOC"); )DOC");
} }
...@@ -252,11 +252,11 @@ class RoundOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -252,11 +252,11 @@ class RoundOpMaker : public framework::OpProtoAndCheckerMaker {
RoundOpMaker(OpProto *proto, OpAttrChecker *op_checker) RoundOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Round operator"); AddInput("X", "Input of Round operator");
AddOutput("Y", "Output of Round operator"); AddOutput("Out", "Output of Round operator");
AddComment(R"DOC( AddComment(R"DOC(
Round Activation Operator. Round Activation Operator.
$y = [x]$ $out = [x]$
)DOC"); )DOC");
} }
...@@ -267,11 +267,11 @@ class ReciprocalOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -267,11 +267,11 @@ class ReciprocalOpMaker : public framework::OpProtoAndCheckerMaker {
ReciprocalOpMaker(OpProto *proto, OpAttrChecker *op_checker) ReciprocalOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Reciprocal operator"); AddInput("X", "Input of Reciprocal operator");
AddOutput("Y", "Output of Reciprocal operator"); AddOutput("Out", "Output of Reciprocal operator");
AddComment(R"DOC( AddComment(R"DOC(
Reciprocal Activation Operator. Reciprocal Activation Operator.
$$y = \frac{1}{x}$$ $$out = \frac{1}{x}$$
)DOC"); )DOC");
} }
...@@ -282,11 +282,11 @@ class LogOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -282,11 +282,11 @@ class LogOpMaker : public framework::OpProtoAndCheckerMaker {
LogOpMaker(OpProto *proto, OpAttrChecker *op_checker) LogOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Log operator"); AddInput("X", "Input of Log operator");
AddOutput("Y", "Output of Log operator"); AddOutput("Out", "Output of Log operator");
AddComment(R"DOC( AddComment(R"DOC(
Log Activation Operator. Log Activation Operator.
$y = \ln(x)$ $out = \ln(x)$
Natural logarithm of x. Natural logarithm of x.
...@@ -299,11 +299,11 @@ class SquareOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -299,11 +299,11 @@ class SquareOpMaker : public framework::OpProtoAndCheckerMaker {
SquareOpMaker(OpProto *proto, OpAttrChecker *op_checker) SquareOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Square operator"); AddInput("X", "Input of Square operator");
AddOutput("Y", "Output of Square operator"); AddOutput("Out", "Output of Square operator");
AddComment(R"DOC( AddComment(R"DOC(
Square Activation Operator. Square Activation Operator.
$y = x^2$ $out = x^2$
)DOC"); )DOC");
} }
...@@ -314,11 +314,11 @@ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -314,11 +314,11 @@ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
SoftplusOpMaker(OpProto *proto, OpAttrChecker *op_checker) SoftplusOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Softplus operator"); AddInput("X", "Input of Softplus operator");
AddOutput("Y", "Output of Softplus operator"); AddOutput("Out", "Output of Softplus operator");
AddComment(R"DOC( AddComment(R"DOC(
Softplus Activation Operator. Softplus Activation Operator.
$y = \ln(1 + e^{x})$ $out = \ln(1 + e^{x})$
)DOC"); )DOC");
} }
...@@ -329,11 +329,11 @@ class SoftsignOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -329,11 +329,11 @@ class SoftsignOpMaker : public framework::OpProtoAndCheckerMaker {
SoftsignOpMaker(OpProto *proto, OpAttrChecker *op_checker) SoftsignOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Softsign operator"); AddInput("X", "Input of Softsign operator");
AddOutput("Y", "Output of Softsign operator"); AddOutput("Out", "Output of Softsign operator");
AddComment(R"DOC( AddComment(R"DOC(
Softsign Activation Operator. Softsign Activation Operator.
$$y = \frac{x}{1 + |x|}$$ $$out = \frac{x}{1 + |x|}$$
)DOC"); )DOC");
} }
...@@ -344,7 +344,7 @@ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -344,7 +344,7 @@ class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
BReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) BReluOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of BRelu operator"); AddInput("X", "Input of BRelu operator");
AddOutput("Y", "Output of BRelu operator"); AddOutput("Out", "Output of BRelu operator");
AddAttr<float>("t_min", "The min marginal value of BRelu") AddAttr<float>("t_min", "The min marginal value of BRelu")
.SetDefault(static_cast<float>(0)); .SetDefault(static_cast<float>(0));
AddAttr<float>("t_max", "The max marginal value of BRelu") AddAttr<float>("t_max", "The max marginal value of BRelu")
...@@ -352,7 +352,7 @@ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -352,7 +352,7 @@ class BReluOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC( AddComment(R"DOC(
BRelu Activation Operator. BRelu Activation Operator.
$y = \max(\min(x, t_{min}), t_{max})$ $out = \max(\min(x, t_{min}), t_{max})$
)DOC"); )DOC");
} }
...@@ -363,13 +363,13 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -363,13 +363,13 @@ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker {
SoftReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) SoftReluOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of SoftRelu operator"); AddInput("X", "Input of SoftRelu operator");
AddOutput("Y", "Output of SoftRelu operator"); AddOutput("Out", "Output of SoftRelu operator");
AddAttr<float>("threshold", "The threshold value of SoftRelu") AddAttr<float>("threshold", "The threshold value of SoftRelu")
.SetDefault(40.0f); .SetDefault(40.0f);
AddComment(R"DOC( AddComment(R"DOC(
SoftRelu Activation Operator. SoftRelu Activation Operator.
$y = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ $out = \ln(1 + \exp(\max(\min(x, threshold), threshold))$
)DOC"); )DOC");
} }
...@@ -380,7 +380,7 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -380,7 +380,7 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
ELUOpMaker(OpProto *proto, OpAttrChecker *op_checker) ELUOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of ELU operator"); AddInput("X", "Input of ELU operator");
AddOutput("Y", "Output of ELU operator"); AddOutput("Out", "Output of ELU operator");
AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f); AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
ELU Activation Operator. ELU Activation Operator.
...@@ -388,7 +388,7 @@ ELU Activation Operator. ...@@ -388,7 +388,7 @@ ELU Activation Operator.
Applies the following element-wise computation on the input according to Applies the following element-wise computation on the input according to
https://arxiv.org/abs/1511.07289. https://arxiv.org/abs/1511.07289.
$y = \max(0, x) + \min(0, \alpha * (e^x - 1))$ $out = \max(0, x) + \min(0, \alpha * (e^x - 1))$
)DOC"); )DOC");
} }
...@@ -399,13 +399,13 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -399,13 +399,13 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
Relu6OpMaker(OpProto *proto, OpAttrChecker *op_checker) Relu6OpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Relu6 operator"); AddInput("X", "Input of Relu6 operator");
AddOutput("Y", "Output of Relu6 operator"); AddOutput("Out", "Output of Relu6 operator");
AddAttr<float>("threshold", "The threshold value of Relu6") AddAttr<float>("threshold", "The threshold value of Relu6")
.SetDefault(6.0f); .SetDefault(6.0f);
AddComment(R"DOC( AddComment(R"DOC(
Relu6 Activation Operator. Relu6 Activation Operator.
$y = \min(\max(0, x), 6)$ $out = \min(\max(0, x), 6)$
)DOC"); )DOC");
} }
...@@ -416,12 +416,12 @@ class PowOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -416,12 +416,12 @@ class PowOpMaker : public framework::OpProtoAndCheckerMaker {
PowOpMaker(OpProto *proto, OpAttrChecker *op_checker) PowOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Pow operator"); AddInput("X", "Input of Pow operator");
AddOutput("Y", "Output of Pow operator"); AddOutput("Out", "Output of Pow operator");
AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f); AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
Pow Activation Operator. Pow Activation Operator.
$y = x^{factor}$ $out = x^{factor}$
)DOC"); )DOC");
} }
...@@ -432,7 +432,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -432,7 +432,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
STanhOpMaker(OpProto *proto, OpAttrChecker *op_checker) STanhOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of STanh operator"); AddInput("X", "Input of STanh operator");
AddOutput("Y", "Output of STanh operator"); AddOutput("Out", "Output of STanh operator");
AddAttr<float>("scale_a", "The scale parameter of a for the input") AddAttr<float>("scale_a", "The scale parameter of a for the input")
.SetDefault(2.0f / 3.0f); .SetDefault(2.0f / 3.0f);
AddAttr<float>("scale_b", "The scale parameter of b for the input") AddAttr<float>("scale_b", "The scale parameter of b for the input")
...@@ -440,7 +440,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -440,7 +440,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC( AddComment(R"DOC(
STanh Activation Operator. STanh Activation Operator.
$$y = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ $$out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$
)DOC"); )DOC");
} }
...@@ -451,14 +451,14 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -451,14 +451,14 @@ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker {
ThresholdedReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) ThresholdedReluOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of ThresholdedRelu operator"); AddInput("X", "Input of ThresholdedRelu operator");
AddOutput("Y", "Output of ThresholdedRelu operator"); AddOutput("Out", "Output of ThresholdedRelu operator");
AddAttr<float>("threshold", "The threshold location of activation") AddAttr<float>("threshold", "The threshold location of activation")
.SetDefault(1.0f); .SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
ThresholdedRelu Activation Operator. ThresholdedRelu Activation Operator.
$$ $$
y = \begin{cases} out = \begin{cases}
x, \text{if } x > threshold \\ x, \text{if } x > threshold \\
0, \text{otherwise} 0, \text{otherwise}
\end{cases} \end{cases}
...@@ -473,7 +473,7 @@ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -473,7 +473,7 @@ class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker {
HardSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) HardSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of HardSigmoid operator"); AddInput("X", "Input of HardSigmoid operator");
AddOutput("Y", "Output of HardSigmoid operator"); AddOutput("Out", "Output of HardSigmoid operator");
AddAttr<float>("slope", "Slope for linear approximation of sigmoid") AddAttr<float>("slope", "Slope for linear approximation of sigmoid")
.SetDefault(0.2f); .SetDefault(0.2f);
AddAttr<float>("offset", "Offset for linear approximation of sigmoid") AddAttr<float>("offset", "Offset for linear approximation of sigmoid")
...@@ -484,7 +484,7 @@ HardSigmoid Activation Operator. ...@@ -484,7 +484,7 @@ HardSigmoid Activation Operator.
Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391), Segment-wise linear approximation of sigmoid(https://arxiv.org/abs/1603.00391),
which is much faster than sigmoid. which is much faster than sigmoid.
$y = \max(0, \min(1, slope * x + shift))$ $out = \max(0, \min(1, slope * x + shift))$
The slope should be positive. The offset can be either positive or negative. The slope should be positive. The offset can be either positive or negative.
The default slope and shift are set according to the above reference. The default slope and shift are set according to the above reference.
...@@ -499,12 +499,12 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -499,12 +499,12 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
SwishOpMaker(OpProto *proto, OpAttrChecker *op_checker) SwishOpMaker(OpProto *proto, OpAttrChecker *op_checker)
: framework::OpProtoAndCheckerMaker(proto, op_checker) { : framework::OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("X", "Input of Swish operator"); AddInput("X", "Input of Swish operator");
AddOutput("Y", "Output of Swish operator"); AddOutput("Out", "Output of Swish operator");
AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f); AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
AddComment(R"DOC( AddComment(R"DOC(
Swish Activation Operator. Swish Activation Operator.
$$y = \frac{x}{1 + e^{- \beta x}}$$ $$out = \frac{x}{1 + e^{- \beta x}}$$
)DOC"); )DOC");
} }
......
此差异已折叠。
...@@ -24,13 +24,13 @@ class SoftmaxOp : public framework::OperatorWithKernel { ...@@ -24,13 +24,13 @@ class SoftmaxOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SoftmaxOp should not be null."); "Input(X) of SoftmaxOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Y"), PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Y) of SoftmaxOp should not be null."); "Output(Out) of SoftmaxOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE(x_dims.size() == 2UL, PADDLE_ENFORCE(x_dims.size() == 2UL,
"The input of softmax op must be a matrix."); "The input of softmax op must be a matrix.");
ctx->SetOutputDim("Y", x_dims); ctx->SetOutputDim("Out", x_dims);
} }
}; };
...@@ -41,7 +41,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -41,7 +41,7 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", AddInput("X",
"The input tensor of softmax. " "The input tensor of softmax. "
"2-D with shape [batch_size, input_feature_dimensions]."); "2-D with shape [batch_size, input_feature_dimensions].");
AddOutput("Y", "The normalized values with the same shape as X."); AddOutput("Out", "The normalized values with the same shape as X.");
AddComment(R"DOC( AddComment(R"DOC(
Softmax Operator. Softmax Operator.
...@@ -59,7 +59,7 @@ exponential values of all the other dimensions is the output of the softmax ...@@ -59,7 +59,7 @@ exponential values of all the other dimensions is the output of the softmax
operator. operator.
For each row $i$ and each column $j$ in Input(X), we have: For each row $i$ and each column $j$ in Input(X), we have:
$$Y[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$ $$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$
)DOC"); )DOC");
} }
...@@ -70,12 +70,12 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ...@@ -70,12 +70,12 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override { void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should be not null."); PADDLE_ENFORCE(ctx->HasInput("Out"), "Input(Out) should be not null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Y")), PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Y@GRAD) should be not null."); "Input(Out@GRAD) should be not null.");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("Y"), PADDLE_ENFORCE_EQ(ctx->GetInputDim("Out"),
ctx->GetInputDim(framework::GradVarName("Y")), ctx->GetInputDim(framework::GradVarName("Out")),
"Input(Y) and its gradients should have a same shape."); "Input(Out) and its gradients should have a same shape.");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
} }
......
...@@ -26,13 +26,13 @@ class SoftmaxKernel : public framework::OpKernel<T> { ...@@ -26,13 +26,13 @@ class SoftmaxKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<Tensor>("X"); auto* X = context.Input<Tensor>("X");
auto* Y = context.Output<Tensor>("Y"); auto* Out = context.Output<Tensor>("Out");
// allocate memory on device. // allocate memory on device.
Y->mutable_data<T>(context.GetPlace()); Out->mutable_data<T>(context.GetPlace());
math::SoftmaxFunctor<DeviceContext, T>()( math::SoftmaxFunctor<DeviceContext, T>()(
context.template device_context<DeviceContext>(), X, Y); context.template device_context<DeviceContext>(), X, Out);
} }
}; };
...@@ -40,15 +40,15 @@ template <typename DeviceContext, typename T> ...@@ -40,15 +40,15 @@ template <typename DeviceContext, typename T>
class SoftmaxGradKernel : public framework::OpKernel<T> { class SoftmaxGradKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* Y = context.Input<Tensor>("Y"); auto* Out = context.Input<Tensor>("Out");
auto* dY = context.Input<Tensor>(framework::GradVarName("Y")); auto* dOut = context.Input<Tensor>(framework::GradVarName("Out"));
auto* dX = context.Output<Tensor>(framework::GradVarName("X")); auto* dX = context.Output<Tensor>(framework::GradVarName("X"));
// allocate memory on device. // allocate memory on device.
dX->mutable_data<T>(context.GetPlace()); dX->mutable_data<T>(context.GetPlace());
math::SoftmaxGradFunctor<DeviceContext, T>()( math::SoftmaxGradFunctor<DeviceContext, T>()(
context.template device_context<DeviceContext>(), Y, dY, dX); context.template device_context<DeviceContext>(), Out, dOut, dX);
} }
}; };
......
...@@ -184,7 +184,7 @@ class LayerHelper(object): ...@@ -184,7 +184,7 @@ class LayerHelper(object):
self.append_op( self.append_op(
type=act_type, type=act_type,
inputs={"X": [input_var]}, inputs={"X": [input_var]},
outputs={"Y": [tmp]}, outputs={"Out": [tmp]},
attrs=act) attrs=act)
return tmp return tmp
......
...@@ -386,7 +386,8 @@ def square_error_cost(input, label, **kwargs): ...@@ -386,7 +386,8 @@ def square_error_cost(input, label, **kwargs):
square_out = helper.create_tmp_variable(dtype=input.dtype) square_out = helper.create_tmp_variable(dtype=input.dtype)
helper.append_op( helper.append_op(
type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]}) type='square', inputs={'X': [minus_out]},
outputs={'Out': [square_out]})
return square_out return square_out
...@@ -604,7 +605,7 @@ def sequence_pool(input, pool_type, **kwargs): ...@@ -604,7 +605,7 @@ def sequence_pool(input, pool_type, **kwargs):
sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2),
6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2)
max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1)
Args: Args:
input(variable): The input variable which is a LoDTensor. input(variable): The input variable which is a LoDTensor.
pool_type (string): The pooling type of sequence_pool. pool_type (string): The pooling type of sequence_pool.
...@@ -616,7 +617,7 @@ def sequence_pool(input, pool_type, **kwargs): ...@@ -616,7 +617,7 @@ def sequence_pool(input, pool_type, **kwargs):
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1], x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
avg_x = fluid.layers.sequence_pool(input=x, pool_type='average') avg_x = fluid.layers.sequence_pool(input=x, pool_type='average')
...@@ -654,7 +655,7 @@ def sequence_first_step(input, **kwargs): ...@@ -654,7 +655,7 @@ def sequence_first_step(input, **kwargs):
out.dim = [3, 1] out.dim = [3, 1]
with condition len(x.lod[-1]) - 1 == out.dims[0] with condition len(x.lod[-1]) - 1 == out.dims[0]
out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1)
Args: Args:
input(variable): The input variable which is a LoDTensor. input(variable): The input variable which is a LoDTensor.
...@@ -664,7 +665,7 @@ def sequence_first_step(input, **kwargs): ...@@ -664,7 +665,7 @@ def sequence_first_step(input, **kwargs):
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1], x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
x_first_step = fluid.layers.sequence_first_step(input=x) x_first_step = fluid.layers.sequence_first_step(input=x)
...@@ -687,7 +688,7 @@ def sequence_last_step(input, **kwargs): ...@@ -687,7 +688,7 @@ def sequence_last_step(input, **kwargs):
out.dim = [3, 1] out.dim = [3, 1]
with condition len(x.lod[-1]) - 1 == out.dims[0] with condition len(x.lod[-1]) - 1 == out.dims[0]
out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1)
Args: Args:
input(variable): The input variable which is a LoDTensor. input(variable): The input variable which is a LoDTensor.
...@@ -697,7 +698,7 @@ def sequence_last_step(input, **kwargs): ...@@ -697,7 +698,7 @@ def sequence_last_step(input, **kwargs):
Examples: Examples:
.. code-block:: python .. code-block:: python
x = fluid.layers.data(name='x', shape=[7, 1], x = fluid.layers.data(name='x', shape=[7, 1],
dtype='float32', lod_level=1) dtype='float32', lod_level=1)
x_last_step = fluid.layers.sequence_last_step(input=x) x_last_step = fluid.layers.sequence_last_step(input=x)
...@@ -1132,7 +1133,7 @@ def reduce_sum(input, dim=None, keep_dim=False): ...@@ -1132,7 +1133,7 @@ def reduce_sum(input, dim=None, keep_dim=False):
Returns: Returns:
Variable: The reduced Tensor variable. Variable: The reduced Tensor variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
...@@ -1176,7 +1177,7 @@ def reduce_mean(input, dim=None, keep_dim=False): ...@@ -1176,7 +1177,7 @@ def reduce_mean(input, dim=None, keep_dim=False):
Returns: Returns:
Variable: The reduced Tensor variable. Variable: The reduced Tensor variable.
Examples: Examples:
.. code-block:: python .. code-block:: python
......
...@@ -10,13 +10,13 @@ class TestExp(OpTest): ...@@ -10,13 +10,13 @@ class TestExp(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.exp(self.inputs['X'])} self.outputs = {'Out': np.exp(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSigmoid(OpTest): class TestSigmoid(OpTest):
...@@ -25,13 +25,13 @@ class TestSigmoid(OpTest): ...@@ -25,13 +25,13 @@ class TestSigmoid(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': 1 / (1 + np.exp(-self.inputs['X']))} self.outputs = {'Out': 1 / (1 + np.exp(-self.inputs['X']))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestLogSigmoid(OpTest): class TestLogSigmoid(OpTest):
...@@ -40,13 +40,13 @@ class TestLogSigmoid(OpTest): ...@@ -40,13 +40,13 @@ class TestLogSigmoid(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.log(1 / (1 + np.exp(-self.inputs['X'])))} self.outputs = {'Out': np.log(1 / (1 + np.exp(-self.inputs['X'])))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestTanh(OpTest): class TestTanh(OpTest):
...@@ -55,13 +55,13 @@ class TestTanh(OpTest): ...@@ -55,13 +55,13 @@ class TestTanh(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.tanh(self.inputs['X'])} self.outputs = {'Out': np.tanh(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestTanhShrink(OpTest): class TestTanhShrink(OpTest):
...@@ -70,13 +70,13 @@ class TestTanhShrink(OpTest): ...@@ -70,13 +70,13 @@ class TestTanhShrink(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [10, 17]).astype("float32")
} }
self.outputs = {'Y': self.inputs['X'] - np.tanh(self.inputs['X'])} self.outputs = {'Out': self.inputs['X'] - np.tanh(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
class TestHardShrink(OpTest): class TestHardShrink(OpTest):
...@@ -90,13 +90,13 @@ class TestHardShrink(OpTest): ...@@ -90,13 +90,13 @@ class TestHardShrink(OpTest):
t = np.copy(x) t = np.copy(x)
t[(t >= -threshold) & (t <= threshold)] = 0 t[(t >= -threshold) & (t <= threshold)] = 0
self.outputs = {'Y': t} self.outputs = {'Out': t}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.005) self.check_grad(['X'], 'Out', max_relative_error=0.005)
class TestSoftShrink(OpTest): class TestSoftShrink(OpTest):
...@@ -110,13 +110,13 @@ class TestSoftShrink(OpTest): ...@@ -110,13 +110,13 @@ class TestSoftShrink(OpTest):
y = np.copy(self.inputs['X']) y = np.copy(self.inputs['X'])
y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * ( y = (y < -lambda_val) * (y + lambda_val) + (y > lambda_val) * (
y - lambda_val) y - lambda_val)
self.outputs = {'Y': y} self.outputs = {'Out': y}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSqrt(OpTest): class TestSqrt(OpTest):
...@@ -125,13 +125,13 @@ class TestSqrt(OpTest): ...@@ -125,13 +125,13 @@ class TestSqrt(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.sqrt(self.inputs['X'])} self.outputs = {'Out': np.sqrt(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestAbs(OpTest): class TestAbs(OpTest):
...@@ -144,13 +144,13 @@ class TestAbs(OpTest): ...@@ -144,13 +144,13 @@ class TestAbs(OpTest):
# we should avoid this # we should avoid this
x[np.abs(x) < 0.005] = 0.02 x[np.abs(x) < 0.005] = 0.02
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.abs(self.inputs['X'])} self.outputs = {'Out': np.abs(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestCeil(OpTest): class TestCeil(OpTest):
...@@ -158,13 +158,13 @@ class TestCeil(OpTest): ...@@ -158,13 +158,13 @@ class TestCeil(OpTest):
self.op_type = "ceil" self.op_type = "ceil"
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.ceil(self.inputs['X'])} self.outputs = {'Out': np.ceil(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestFloor(OpTest): class TestFloor(OpTest):
...@@ -173,13 +173,13 @@ class TestFloor(OpTest): ...@@ -173,13 +173,13 @@ class TestFloor(OpTest):
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
# numpy floor need +1 # numpy floor need +1
self.outputs = {'Y': np.floor(self.inputs['X']) + 1.0} self.outputs = {'Out': np.floor(self.inputs['X']) + 1.0}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestRound(OpTest): class TestRound(OpTest):
...@@ -187,13 +187,13 @@ class TestRound(OpTest): ...@@ -187,13 +187,13 @@ class TestRound(OpTest):
self.op_type = "round" self.op_type = "round"
x = np.random.uniform(-1, 1, [4, 4]).astype("float32") x = np.random.uniform(-1, 1, [4, 4]).astype("float32")
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.round(self.inputs['X'])} self.outputs = {'Out': np.round(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestRelu(OpTest): class TestRelu(OpTest):
...@@ -203,13 +203,13 @@ class TestRelu(OpTest): ...@@ -203,13 +203,13 @@ class TestRelu(OpTest):
# The same reason with TestAbs # The same reason with TestAbs
x[np.abs(x) < 0.005] = 0.02 x[np.abs(x) < 0.005] = 0.02
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Y': np.maximum(self.inputs['X'], 0)} self.outputs = {'Out': np.maximum(self.inputs['X'], 0)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestBRelu(OpTest): class TestBRelu(OpTest):
...@@ -227,13 +227,13 @@ class TestBRelu(OpTest): ...@@ -227,13 +227,13 @@ class TestBRelu(OpTest):
t = np.copy(x) t = np.copy(x)
t[t < t_min] = t_min t[t < t_min] = t_min
t[t > t_max] = t_max t[t > t_max] = t_max
self.outputs = {'Y': t} self.outputs = {'Out': t}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestRelu6(OpTest): class TestRelu6(OpTest):
...@@ -248,14 +248,14 @@ class TestRelu6(OpTest): ...@@ -248,14 +248,14 @@ class TestRelu6(OpTest):
self.inputs = {'X': x} self.inputs = {'X': x}
self.attrs = {'threshold': threshold} self.attrs = {'threshold': threshold}
self.outputs = { self.outputs = {
'Y': np.minimum(np.maximum(self.inputs['X'], 0), threshold) 'Out': np.minimum(np.maximum(self.inputs['X'], 0), threshold)
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestSoftRelu(OpTest): class TestSoftRelu(OpTest):
...@@ -271,13 +271,13 @@ class TestSoftRelu(OpTest): ...@@ -271,13 +271,13 @@ class TestSoftRelu(OpTest):
t = np.copy(x) t = np.copy(x)
t[t < -threshold] = -threshold t[t < -threshold] = -threshold
t[t > threshold] = threshold t[t > threshold] = threshold
self.outputs = {'Y': np.log((np.exp(t) + 1))} self.outputs = {'Out': np.log((np.exp(t) + 1))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestELU(OpTest): class TestELU(OpTest):
...@@ -290,27 +290,27 @@ class TestELU(OpTest): ...@@ -290,27 +290,27 @@ class TestELU(OpTest):
self.inputs = {'X': x} self.inputs = {'X': x}
self.attrs = {'alpha': alpha} self.attrs = {'alpha': alpha}
self.outputs = { self.outputs = {
'Y': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1)) 'Out': np.maximum(0, x) + np.minimum(0, alpha * (np.exp(x) - 1))
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestReciprocal(OpTest): class TestReciprocal(OpTest):
def setUp(self): def setUp(self):
self.op_type = "reciprocal" self.op_type = "reciprocal"
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
self.outputs = {'Y': np.reciprocal(self.inputs['X'])} self.outputs = {'Out': np.reciprocal(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.01) self.check_grad(['X'], 'Out', max_relative_error=0.01)
class TestLog(OpTest): class TestLog(OpTest):
...@@ -319,13 +319,13 @@ class TestLog(OpTest): ...@@ -319,13 +319,13 @@ class TestLog(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.log(self.inputs['X'])} self.outputs = {'Out': np.log(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSquare(OpTest): class TestSquare(OpTest):
...@@ -334,13 +334,13 @@ class TestSquare(OpTest): ...@@ -334,13 +334,13 @@ class TestSquare(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(0.1, 1, [11, 17]).astype("float32")
} }
self.outputs = {'Y': np.square(self.inputs['X'])} self.outputs = {'Out': np.square(self.inputs['X'])}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestPow(OpTest): class TestPow(OpTest):
...@@ -348,13 +348,13 @@ class TestPow(OpTest): ...@@ -348,13 +348,13 @@ class TestPow(OpTest):
self.op_type = "pow" self.op_type = "pow"
self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")} self.inputs = {'X': np.random.uniform(1, 2, [11, 17]).astype("float32")}
self.attrs = {'factor': 3.0} self.attrs = {'factor': 3.0}
self.outputs = {'Y': np.power(self.inputs['X'], 3)} self.outputs = {'Out': np.power(self.inputs['X'], 3)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.02) self.check_grad(['X'], 'Out', max_relative_error=0.02)
class TestSTanh(OpTest): class TestSTanh(OpTest):
...@@ -366,13 +366,13 @@ class TestSTanh(OpTest): ...@@ -366,13 +366,13 @@ class TestSTanh(OpTest):
scale_a = 2.0 / 3.0 scale_a = 2.0 / 3.0
scale_b = 1.7159 scale_b = 1.7159
self.attrs = {'scale_a': scale_a, 'scale_b': scale_b} self.attrs = {'scale_a': scale_a, 'scale_b': scale_b}
self.outputs = {'Y': scale_b * np.tanh(self.inputs['X'] * scale_a)} self.outputs = {'Out': scale_b * np.tanh(self.inputs['X'] * scale_a)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSoftplus(OpTest): class TestSoftplus(OpTest):
...@@ -381,13 +381,13 @@ class TestSoftplus(OpTest): ...@@ -381,13 +381,13 @@ class TestSoftplus(OpTest):
self.inputs = { self.inputs = {
'X': np.random.uniform(-1, 1, [11, 17]).astype("float64") 'X': np.random.uniform(-1, 1, [11, 17]).astype("float64")
} }
self.outputs = {'Y': np.log(1 + np.exp(self.inputs['X']))} self.outputs = {'Out': np.log(1 + np.exp(self.inputs['X']))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestSoftsign(OpTest): class TestSoftsign(OpTest):
...@@ -397,14 +397,14 @@ class TestSoftsign(OpTest): ...@@ -397,14 +397,14 @@ class TestSoftsign(OpTest):
'X': np.random.uniform(-1, 1, [11, 17]).astype("float32") 'X': np.random.uniform(-1, 1, [11, 17]).astype("float32")
} }
self.outputs = { self.outputs = {
'Y': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X'])) 'Out': np.divide(self.inputs['X'], 1 + np.abs(self.inputs['X']))
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.007) self.check_grad(['X'], 'Out', max_relative_error=0.007)
class TestThresholdedRelu(OpTest): class TestThresholdedRelu(OpTest):
...@@ -419,13 +419,13 @@ class TestThresholdedRelu(OpTest): ...@@ -419,13 +419,13 @@ class TestThresholdedRelu(OpTest):
self.inputs = {'X': X} self.inputs = {'X': X}
self.attrs = {'threshold': threshold} self.attrs = {'threshold': threshold}
self.outputs = {'Y': (X > threshold) * X} self.outputs = {'Out': (X > threshold) * X}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=self.relative_error) self.check_grad(['X'], 'Out', max_relative_error=self.relative_error)
class TestHardSigmoid(OpTest): class TestHardSigmoid(OpTest):
...@@ -447,13 +447,13 @@ class TestHardSigmoid(OpTest): ...@@ -447,13 +447,13 @@ class TestHardSigmoid(OpTest):
upper_threshold - 0.2 upper_threshold - 0.2
temp = X * slope + offset temp = X * slope + offset
self.outputs = {'Y': np.maximum(0.0, np.minimum(1.0, temp))} self.outputs = {'Out': np.maximum(0.0, np.minimum(1.0, temp))}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.002) self.check_grad(['X'], 'Out', max_relative_error=0.002)
class TestSwish(OpTest): class TestSwish(OpTest):
...@@ -462,13 +462,13 @@ class TestSwish(OpTest): ...@@ -462,13 +462,13 @@ class TestSwish(OpTest):
X = np.random.uniform(0.1, 1, [11, 17]).astype("float32") X = np.random.uniform(0.1, 1, [11, 17]).astype("float32")
self.inputs = {'X': X} self.inputs = {'X': X}
self.attrs = {'beta': 2.3} self.attrs = {'beta': 2.3}
self.outputs = {'Y': X * expit(self.attrs['beta'] * X)} self.outputs = {'Out': X * expit(self.attrs['beta'] * X)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y', max_relative_error=0.008) self.check_grad(['X'], 'Out', max_relative_error=0.008)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -7,7 +7,7 @@ def fc(X, W, Y): ...@@ -7,7 +7,7 @@ def fc(X, W, Y):
ret_v = core.Net.create() ret_v = core.Net.create()
ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation")) ret_v.append_op(Operator("mul", X="X", Y="W", Out="pre_activation"))
ret_v.append_op(Operator("sigmoid", X="pre_activation", Y=Y)) ret_v.append_op(Operator("sigmoid", X="pre_activation", Out=Y))
ret_v.complete_add_op(True) ret_v.complete_add_op(True)
return ret_v return ret_v
...@@ -30,7 +30,7 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]} ...@@ -30,7 +30,7 @@ Op(plain_net), inputs:{all[W, X, Y]}, outputs:{all[Out, fc.out, pre_activation]}
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}. Op(plain_net), inputs:{all[W, X]}, outputs:{all[fc.out, pre_activation]}.
Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}. Op(mul), inputs:{X[X], Y[W]}, outputs:{Out[pre_activation]}.
Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Y[fc.out]}. Op(sigmoid), inputs:{X[pre_activation]}, outputs:{Out[fc.out]}.
''' '''
self.assertEqual(expected, "\n" + str(net)) self.assertEqual(expected, "\n" + str(net))
......
...@@ -17,14 +17,14 @@ class TestSoftmaxOp(OpTest): ...@@ -17,14 +17,14 @@ class TestSoftmaxOp(OpTest):
'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32") 'X': np.random.uniform(0.1, 1, [10, 10]).astype("float32")
} }
self.outputs = { self.outputs = {
'Y': np.apply_along_axis(stable_softmax, 1, self.inputs['X']) 'Out': np.apply_along_axis(stable_softmax, 1, self.inputs['X'])
} }
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Y') self.check_grad(['X'], 'Out')
if __name__ == "__main__": if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册