未验证 提交 8a122ecc 编写于 作者: Z zyfncg 提交者: GitHub

move apis from legacy_api.yaml to api.yaml (#43832)

* move apis from legacy_api.yaml to api.yaml

* remove comment

* revert argsort

* fix merge conflict
上级 7f22ef54
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class Atan2Op : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class Atan2OpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X1", "(Tensor), The input tensor of atan2 op.");
AddInput("X2", "(Tensor), The input tensor of atan2 op.");
AddOutput("Out", "(Tensor), The output tensor of atan2 op.");
AddComment(R"DOC(
Atan2 Operator.
This operator is used to perform elementwise atan2 for input $X1$, $X2$.
$$out = atan2(x1, x2)$$
)DOC");
}
};
class Atan2GradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X1"), "Input", "X1", "Atan2Grad");
OP_INOUT_CHECK(ctx->HasInput("X2"), "Input", "X2", "Atan2Grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
"Out@Grad",
"Atan2Grad");
auto x1_grad_name = framework::GradVarName("X1");
auto x2_grad_name = framework::GradVarName("X2");
auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out"));
if (ctx->HasOutput(x1_grad_name)) {
ctx->SetOutputDim(framework::GradVarName("X1"), dout_dims);
}
if (ctx->HasOutput(x2_grad_name)) {
ctx->SetOutputDim(framework::GradVarName("X2"), dout_dims);
}
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto dtype = OperatorWithKernel::IndicateVarDataType(ctx, "X1");
return framework::OpKernelType(dtype, ctx.GetPlace());
}
};
template <typename T>
class Atan2GradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("atan2_grad");
retv->SetInput("X1", this->Input("X1"));
retv->SetInput("X2", this->Input("X2"));
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetAttrMap(this->Attrs());
retv->SetOutput(framework::GradVarName("X1"), this->InputGrad("X1"));
retv->SetOutput(framework::GradVarName("X2"), this->InputGrad("X2"));
}
};
class Atan2OpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext* ctx) const override {
auto type = ctx->GetInputDataType("X1");
if (ctx->GetInputDataType("X1") == framework::proto::VarType::INT32 ||
ctx->GetInputDataType("X1") == framework::proto::VarType::INT64 ||
ctx->GetInputDataType("X2") == framework::proto::VarType::INT32 ||
ctx->GetInputDataType("X2") == framework::proto::VarType::INT64) {
type = framework::proto::VarType::FP64;
}
ctx->SetOutputDataType("Out", type);
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(atan2,
Atan2InferShapeFunctor,
PD_INFER_META(phi::Atan2InferMeta));
REGISTER_OPERATOR(atan2,
ops::Atan2Op,
ops::Atan2OpMaker,
ops::Atan2GradMaker<paddle::framework::OpDesc>,
ops::Atan2GradMaker<paddle::imperative::OpBase>,
ops::Atan2OpVarTypeInference,
Atan2InferShapeFunctor);
REGISTER_OPERATOR(atan2_grad, ops::Atan2GradOp);
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
using framework::OpKernelType;
using framework::Tensor;
class CholeskyOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class CholeskyOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor), The input tensor of cholesky op. Its shape should be "
"[*, M, M] where * is zero or more batch dimensions, and matrices "
"on the inner-most 2 dimensions all should be symmetric "
"positive-definite.");
AddOutput("Out",
"(Tensor), The output tensor of cholesky op. It has the same "
"shape as the input, and it is composed of upper-triangular or "
"lower-triangular Cholesky factors of each of the individual "
"matrices.");
AddAttr<bool>("upper",
"(bool, default false), flag indicating whether to return "
"upper or lower triangular matrices. Default: False")
.SetDefault(false);
AddComment(R"DOC(
Cholesky Operator.
Computes the Cholesky decomposition of one symmetric positive-definite matrix
or batches of symmetric positive-definite matrices.
)DOC");
}
};
class CholeskyGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "CholeskyGrad");
OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")),
"Input",
"Out@GRAD",
"CholeskyGrad");
auto dims = ctx->GetInputDim("Out");
auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) {
ctx->SetOutputDim(x_grad_name, dims);
}
}
};
template <typename T>
class CholeskyGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType(this->ForwardOpType() + "_grad");
op->SetInput("Out", this->Output("Out"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(cholesky,
CholeskyInferShapeFunctor,
PD_INFER_META(phi::CholeskyInferMeta));
REGISTER_OPERATOR(cholesky,
ops::CholeskyOp,
ops::CholeskyOpMaker,
ops::CholeskyGradOpMaker<paddle::framework::OpDesc>,
ops::CholeskyGradOpMaker<paddle::imperative::OpBase>,
CholeskyInferShapeFunctor);
REGISTER_OPERATOR(cholesky_grad, ops::CholeskyGradOp);
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class CholeskySolveOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddComment(R"DOC(Solves a linear system of equations with a positive "
"semidefinite matrix to be inverted given its Cholesky factor matrix uu."
")DOC");
AddInput("X", "(Tensor) The input tensor, shape of (*,m,k)");
AddInput("Y",
"(Tensor) The input tensor, shape of (*,m,m) composed of upper or "
"lower triangular Cholesky factor");
AddOutput("Out", "(Tensor) The output tensor, shape same to X");
AddAttr<bool>("upper",
"whether to consider the Cholesky factor "
"as a lower or upper triangular matrix")
.SetDefault(false);
}
};
class CholeskySolveOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "Y"), ctx.GetPlace());
}
};
class CholeskySolveOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext *ctx) const override {
auto var_type = ctx->GetInputType("Y", 0);
auto data_type = ctx->GetInputDataType("Y", 0);
ctx->SetOutputType("Out", var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType("Out", data_type, framework::ALL_ELEMENTS);
}
};
template <typename T>
class CholeskySolveOpGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("cholesky_solve_grad");
retv->SetInput("X", this->Input("X"));
retv->SetInput("Y", this->Input("Y"));
retv->SetInput("Out", this->Output("Out"));
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
retv->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
retv->SetAttrMap(this->Attrs());
}
};
class CholeskySolveGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "cholesky_solve");
OP_INOUT_CHECK(ctx->HasInput("Y"), "Input", "Y", "cholesky_solve");
OP_INOUT_CHECK(ctx->HasInput("Out"), "Input", "Out", "cholesky_solve");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
"Out@GRAD",
"cholesky_solve");
auto x_dims = ctx->GetInputDim("X");
auto y_dims = ctx->GetInputDim("Y");
auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(x_grad_name)) {
ctx->SetOutputDim(x_grad_name, x_dims);
}
if (ctx->HasOutput(y_grad_name)) {
ctx->SetOutputDim(y_grad_name, y_dims);
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(cholesky_solve,
CholeskySolveInferShapeFunctor,
PD_INFER_META(phi::CholeskySolveInferMeta));
REGISTER_OPERATOR(cholesky_solve,
ops::CholeskySolveOp,
ops::CholeskySolveOpMaker,
ops::CholeskySolveOpVarTypeInference,
ops::CholeskySolveOpGradMaker<paddle::framework::OpDesc>,
ops::CholeskySolveOpGradMaker<paddle::imperative::OpBase>,
CholeskySolveInferShapeFunctor);
REGISTER_OPERATOR(cholesky_solve_grad, ops::CholeskySolveGradOp);
...@@ -160,6 +160,10 @@ void KLDivInferMeta(const MetaTensor& x, ...@@ -160,6 +160,10 @@ void KLDivInferMeta(const MetaTensor& x,
void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) { void Atan2InferMeta(const MetaTensor& x, const MetaTensor& y, MetaTensor* out) {
out->share_meta(x); out->share_meta(x);
if (x.dtype() == DataType::INT32 || x.dtype() == DataType::INT64 ||
y.dtype() == DataType::INT32 || y.dtype() == DataType::INT64) {
out->set_dtype(DataType::FLOAT64);
}
} }
void BCELossInferMeta(const MetaTensor& input, void BCELossInferMeta(const MetaTensor& input,
......
...@@ -18,6 +18,26 @@ ...@@ -18,6 +18,26 @@
namespace phi { namespace phi {
/**
* @brief Performs sorting on the input tensor along the given axis and outputs
* two tensors, Output(Out) and Output(Indices). They reserve the same
* shape with Input(X), and Output(Out) represents the sorted tensor
* while Output(Indices) gives the sorted order along the given axis
* Attr(axis).
* @param ctx device context
* @param x The input of Argsort
* @param axis The axis along which to sort the tensor.
* When axis < 0, the actual axis will be the |axis|'th
* counting backwards
* @param descending The descending attribute is a flag to tell
* algorithm how to sort the input data.
* If descending is true, will sort by descending order,
* else if false, sort by ascending order
* @param out The sorted tensor of Argsort op, with the same shape as
* x
* @param indices The indices of a tensor giving the sorted order, with
* the same shape as x
*/
template <typename T, typename Context> template <typename T, typename Context>
void ArgsortKernel(const Context& dev_ctx, void ArgsortKernel(const Context& dev_ctx,
const DenseTensor& input, const DenseTensor& input,
......
...@@ -18,6 +18,21 @@ limitations under the License. */ ...@@ -18,6 +18,21 @@ limitations under the License. */
namespace phi { namespace phi {
/**
* @brief Computes the Cholesky decomposition of one symmetric positive-definite
* matrix or batches of symmetric positive-definite matrices.
* @param ctx device context
* @param x The input tensor of cholesky op. Its shape should be
* [*, M, M] where * is zero or more batch dimensions,
* and matrices on the inner-most 2 dimensions all
* should be symmetric positive-definite
* @param upper flag indicating whether to return upper or lower triangular
* matrices
* @param out The output tensor of cholesky kernel. It has the same
* shape as the input, and it is composed of upper-triangular or
* lower-triangular Cholesky factors of each of the individual
* matrices
*/
template <typename T, typename Context> template <typename T, typename Context>
void CholeskyKernel(const Context& dev_ctx, void CholeskyKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
......
...@@ -18,6 +18,17 @@ ...@@ -18,6 +18,17 @@
namespace phi { namespace phi {
/**
* @brief Solves a linear system of equations with a positive semidefinite
* matrix to be inverted given its Cholesky factor matrix uu
* @param ctx device context
* @param x The input tensor, shape of (*,m,k)
* @param y The input tensor, shape of (*,m,m) composed of upper or lower
* triangular Cholesky factor
* @param upper whether to consider the Cholesky factor as a lower or upper
* triangular matrix
* @param out The output tensor, shape same to x
*/
template <typename T, typename Context> template <typename T, typename Context>
void CholeskySolveKernel(const Context& dev_ctx, void CholeskySolveKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature Atan2GradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"atan2_grad", {"X1", "X2", "Out@GRAD"}, {}, {"X1@GRAD", "X2@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(atan2_grad, phi::Atan2GradOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature CholeskyGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"cholesky_grad", {"Out", "Out@GRAD"}, {"upper"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(cholesky_grad, phi::CholeskyGradOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature CholeskySolveGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("cholesky_solve_grad",
{"X", "Y", "Out", "Out@GRAD"},
{"upper"},
{"X@GRAD", "Y@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(cholesky_solve_grad,
phi::CholeskySolveGradOpArgumentMapping);
- api : atan2
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : Atan2InferMeta
kernel :
func : atan2
backward : atan2_grad
- api : bernoulli - api : bernoulli
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
...@@ -6,6 +15,24 @@ ...@@ -6,6 +15,24 @@
kernel : kernel :
func : bernoulli func : bernoulli
- api : cholesky
args : (Tensor x, bool upper=false)
output : Tensor
infer_meta :
func : CholeskyInferMeta
kernel :
func : cholesky
backward : cholesky_grad
- api : cholesky_solve
args : (Tensor x, Tensor y, bool upper=false)
output : Tensor
infer_meta :
func : CholeskySolveInferMeta
kernel :
func : cholesky_solve
backward : cholesky_solve_grad
- api : erf - api : erf
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
- api : atan2
inputs :
x : X1
y : X2
outputs :
out : Out
- api : trace - api : trace
inputs : inputs :
x : Input x : Input
......
- backward_api : atan2_grad
forward : atan2 (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : atan2_grad
- backward_api : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : cholesky_grad
- backward_api : cholesky_solve_grad
forward : cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : cholesky_solve_grad
- backward_api : erf_grad - backward_api : erf_grad
forward : erf (Tensor x) -> Tensor(out) forward : erf (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
...@@ -152,9 +152,8 @@ ...@@ -152,9 +152,8 @@
kernel : kernel :
func : arg_min func : arg_min
# argsort
- api : argsort - api : argsort
args : (Tensor x, int axis, bool descending) args : (Tensor x, int axis=-1, bool descending=false)
output : Tensor(out), Tensor(indices) output : Tensor(out), Tensor(indices)
infer_meta : infer_meta :
func : ArgsortInferMeta func : ArgsortInferMeta
...@@ -214,15 +213,6 @@ ...@@ -214,15 +213,6 @@
func : atan func : atan
backward : atan_grad backward : atan_grad
- api : atan2
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : Atan2InferMeta
kernel :
func : atan2
backward : atan2_grad
# atanh # atanh
- api : atanh - api : atanh
args : (Tensor x) args : (Tensor x)
...@@ -335,26 +325,6 @@ ...@@ -335,26 +325,6 @@
func : celu func : celu
backward : celu_grad backward : celu_grad
# cholesky
- api : cholesky
args : (Tensor x, bool upper)
output : Tensor
infer_meta :
func : CholeskyInferMeta
kernel :
func : cholesky
backward : cholesky_grad
# cholesky_solve
- api : cholesky_solve
args : (Tensor x, Tensor y, bool upper)
output : Tensor
infer_meta :
func : CholeskySolveInferMeta
kernel :
func : cholesky_solve
backward : cholesky_solve_grad
- api : clip - api : clip
args : (Tensor x, Scalar(float) min, Scalar(float) max) args : (Tensor x, Scalar(float) min, Scalar(float) max)
output : Tensor(out) output : Tensor(out)
......
...@@ -108,6 +108,7 @@ ...@@ -108,6 +108,7 @@
param : [x] param : [x]
kernel : kernel :
func : argsort_grad func : argsort_grad
data_type : out_grad
no_need_buffer : x no_need_buffer : x
- backward_api : asin_grad - backward_api : asin_grad
...@@ -152,16 +153,6 @@ ...@@ -152,16 +153,6 @@
func : assign func : assign
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_api : atan2_grad
forward : atan2 (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : atan2_grad
- backward_api : atan_grad - backward_api : atan_grad
forward : atan (Tensor x) -> Tensor(out) forward : atan (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
...@@ -278,26 +269,6 @@ ...@@ -278,26 +269,6 @@
backward : celu_double_grad backward : celu_double_grad
inplace : (out_grad -> x_grad) inplace : (out_grad -> x_grad)
- backward_api : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : cholesky_grad
- backward_api : cholesky_solve_grad
forward : cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : cholesky_solve_grad
- backward_api : clip_double_grad - backward_api : clip_double_grad
forward : clip_grad (Tensor x, Tensor grad_out, Scalar min = 0., Scalar max = 0.) -> Tensor(grad_x) forward : clip_grad (Tensor x, Tensor grad_out, Scalar min = 0., Scalar max = 0.) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad, Scalar min = 0., Scalar max = 0.) args : (Tensor x, Tensor grad_x_grad, Scalar min = 0., Scalar max = 0.)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册