未验证 提交 8fa8e17e 编写于 作者: Z zyfncg 提交者: GitHub

Move apis(cross, diagonal) legacy_api.yaml to api.yaml (#43893)

* move cross form legacy_api.yaml to api.yaml

* move diagonal to api.yaml
上级 fb1a93a8
...@@ -12,17 +12,13 @@ ...@@ -12,17 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/fluid/operators/conj_op.h"
#include <memory> #include <memory>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h" #include "paddle/phi/infermeta/unary.h"
...@@ -75,14 +71,3 @@ REGISTER_OPERATOR(conj, ...@@ -75,14 +71,3 @@ REGISTER_OPERATOR(conj,
ops::ConjGradMaker<paddle::framework::OpDesc>, ops::ConjGradMaker<paddle::framework::OpDesc>,
ops::ConjGradMaker<paddle::imperative::OpBase>, ops::ConjGradMaker<paddle::imperative::OpBase>,
ConjInferShapeFunctor); ConjInferShapeFunctor);
REGISTER_OP_CPU_KERNEL(
conj,
ops::ConjKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex<float>>,
ops::ConjKernel<paddle::platform::CPUDeviceContext,
paddle::platform::complex<double>>,
ops::ConjKernel<paddle::platform::CPUDeviceContext, float>,
ops::ConjKernel<paddle::platform::CPUDeviceContext, double>,
ops::ConjKernel<paddle::platform::CPUDeviceContext, int>,
ops::ConjKernel<paddle::platform::CPUDeviceContext, int64_t>);
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/operators/conj_op.h"
#include "paddle/fluid/platform/complex.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
conj,
ops::ConjKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<float>>,
ops::ConjKernel<paddle::platform::CUDADeviceContext,
paddle::platform::complex<double>>,
ops::ConjKernel<paddle::platform::CUDADeviceContext, float>,
ops::ConjKernel<paddle::platform::CUDADeviceContext, double>,
ops::ConjKernel<paddle::platform::CUDADeviceContext, int>,
ops::ConjKernel<paddle::platform::CUDADeviceContext, int64_t>);
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
// only can include the headers in paddle/phi/api dirs
#include "paddle/phi/api/lib/utils/tensor_utils.h"
#include "paddle/phi/kernels/complex_kernel.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
template <typename DeviceContext, typename T>
class ConjKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
const Tensor* x = context.Input<Tensor>("X");
Tensor* out = context.Output<Tensor>("Out");
out->mutable_data<T>(context.GetPlace(), size_t(x->numel() * sizeof(T)));
auto& dev_ctx = context.device_context<DeviceContext>();
// call new kernel
phi::ConjKernel<T>(
static_cast<const typename paddle::framework::ConvertToPhiContext<
DeviceContext>::TYPE&>(dev_ctx),
*x,
out);
}
};
DECLARE_INPLACE_OP_INFERER(ConjOpInplaceInferer, {"X", "Out"});
} // namespace operators
} // namespace paddle
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
using framework::DDim;
using framework::Tensor;
const int kDefaultDim = framework::DDim::kMaxRank;
class CrossOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.device_context());
}
};
class CrossGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"),
true,
platform::errors::InvalidArgument("Input(X) should be not null."));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Y"),
true,
platform::errors::InvalidArgument("Input(Y) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")),
true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")),
true,
platform::errors::InvalidArgument(
"Output(X@GRAD) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("Y")),
true,
platform::errors::InvalidArgument(
"Output(Y@GRAD) should be not null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
ctx->SetOutputDim(framework::GradVarName("Y"), ctx->GetInputDim("Y"));
auto x_dims = ctx->GetInputsDim("X");
auto y_dims = ctx->GetInputsDim("Y");
for (size_t i = 0; i < x_dims.size(); ++i) {
PADDLE_ENFORCE_EQ(x_dims[i],
y_dims[i],
phi::errors::InvalidArgument(
"The 'shape' of Input(X) should be equal to "
"the 'shape' of Input(Y). But received "
"Input(X).dimensions = [%s], "
"Input(Y).dimensions = [%s]",
x_dims[i],
y_dims[i]));
}
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context());
}
};
class CrossOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor) the input tensor.");
AddInput("Y", "(Tensor) the second input tensor.");
AddOutput("Out", "(Tensor), the output tensor.");
AddAttr<int>("dim", "the dimension to take the cross-product in.")
.SetDefault(kDefaultDim);
AddComment(R"DOC(
Returns the cross product of vectors in dimension dim of
input and other. Input and other must have the same size,
and the size of their dim dimension should be 3.
If dim is not given, it defaults to the first dimension
found with the size 3.
)DOC");
}
};
template <typename T>
class CrossGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("cross_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(cross,
CrossInferShapeFunctor,
PD_INFER_META(phi::CrossInferMeta));
REGISTER_OPERATOR(cross,
ops::CrossOp,
ops::CrossOpMaker,
ops::CrossGradMaker<paddle::framework::OpDesc>,
ops::CrossGradMaker<paddle::imperative::OpBase>,
CrossInferShapeFunctor);
REGISTER_OPERATOR(cross_grad, ops::CrossGradOp);
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class DiagonalOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class DiagonalOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Input",
"(Tensor) The input tensor, from which the diagonals are taken.");
AddOutput(
"Out",
"(Tensor) The partial view of input with the its diagonal elements.");
AddAttr<int>(
"offset",
R"DOC((int, default 0), offset of the diagonal from the main diagonal. Can be both positive and negative. Default: 0.
)DOC")
.SetDefault(0);
AddAttr<int>(
"axis1",
R"DOC((int, default 0), the first axis of the 2-D planes from which the diagonals should be taken.
Can be either positive or negative. Default: 0.
)DOC")
.SetDefault(0);
AddAttr<int>(
"axis2",
R"DOC((int, default 1), the second axis of the 2-D planes from which the diagonals should be taken.
Can be either positive or negative. Default: 1.
)DOC")
.SetDefault(1);
AddComment(R"DOC(
Diagonal Operator.
Return a partial view of input with the its diagonal elements of the input tensor.
The behavior of this operator is similar to how `numpy.diagonal` works.
)DOC");
}
};
class DiagonalGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "DiagonalGrad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Input")),
"Output",
framework::GradVarName("Input"),
"DiagonalGrad");
ctx->SetOutputDim(framework::GradVarName("Input"),
ctx->GetInputDim("Input"));
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.GetPlace());
}
};
template <typename T>
class DiagonalGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("diagonal_grad");
grad_op->SetInput("Input", this->Input("Input"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("Input"),
this->InputGrad("Input"));
grad_op->SetAttrMap(this->Attrs());
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(DiagonalGradNoNeedBufferVarsInferer,
"Input");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(diagonal,
DiagonalInferShapeFunctor,
PD_INFER_META(phi::DiagonalInferMeta));
REGISTER_OPERATOR(diagonal,
ops::DiagonalOp,
ops::DiagonalOpMaker,
ops::DiagonalGradOpMaker<paddle::framework::OpDesc>,
ops::DiagonalGradOpMaker<paddle::imperative::OpBase>,
DiagonalInferShapeFunctor);
REGISTER_OPERATOR(diagonal_grad,
ops::DiagonalGradOp,
ops::DiagonalGradNoNeedBufferVarsInferer)
...@@ -24,7 +24,6 @@ ...@@ -24,7 +24,6 @@
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "paddle/fluid/operators/conj_op.h"
#include "paddle/fluid/operators/spectral_op.h" #include "paddle/fluid/operators/spectral_op.h"
#include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/enforce.h"
......
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/operators/conj_op.h"
#include "paddle/fluid/operators/eigen/eigen_function.h" #include "paddle/fluid/operators/eigen/eigen_function.h"
#include "paddle/fluid/operators/transpose_op.h" #include "paddle/fluid/operators/transpose_op.h"
#include "paddle/fluid/platform/complex.h" #include "paddle/fluid/platform/complex.h"
......
...@@ -18,6 +18,18 @@ ...@@ -18,6 +18,18 @@
namespace phi { namespace phi {
/**
* @brief Returns the cross product of vectors in dimension dim of
* input and other. Input and other must have the same size,
* and the size of their dim dimension should be 3.
* If dim is not given, it defaults to the first dimension
* found with the size 3.
* @param ctx device context
* @param x the input tensor
* @param y the second input tensor
* @param axis the dimension to take the cross-product in
* @param out the output tensor
*/
template <typename T, typename Context> template <typename T, typename Context>
void CrossKernel(const Context& dev_ctx, void CrossKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
......
...@@ -18,6 +18,20 @@ ...@@ -18,6 +18,20 @@
namespace phi { namespace phi {
/**
* @brief Return a partial view of input with the its diagonal elements
* of the input tensor. The behavior of this operator is similar to
* how `numpy.diagonal` works.
* @param ctx device context
* @param x the input tensor, from which the diagonals are taken
* @param offset offset of the diagonal from the main diagonal. Can be both
* positive and negative
* @param axis1 the first axis of the 2-D planes from which the diagonals
* should be taken. Can be either positive or negative
* @param axis2 the second axis of the 2-D planes from which the diagonals
* should be taken. Can be either positive or negative
* @param out the partial view of input with the its diagonal elements
*/
template <typename T, typename Context> template <typename T, typename Context>
void DiagonalKernel(const Context& dev_ctx, void DiagonalKernel(const Context& dev_ctx,
const DenseTensor& x, const DenseTensor& x,
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature CrossOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("cross", {"X", "Y"}, {"dim"}, {"Out"});
}
KernelSignature CrossGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"cross_grad", {"X", "Y", "Out@GRAD"}, {"dim"}, {"X@GRAD", "Y@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(cross, phi::CrossOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(cross_grad, phi::CrossGradOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature DiagonalGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("diagonal_grad",
{"Input", "Out@GRAD"},
{"offset", "axis1", "axis2"},
{"Input@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(diagonal_grad, phi::DiagonalGradOpArgumentMapping);
...@@ -33,6 +33,25 @@ ...@@ -33,6 +33,25 @@
func : cholesky_solve func : cholesky_solve
backward : cholesky_solve_grad backward : cholesky_solve_grad
- api : cross
args : (Tensor x, Tensor y, int axis = 9)
output : Tensor
infer_meta :
func : CrossInferMeta
kernel :
func : cross
data_type : x
backward : cross_grad
- api : diagonal
args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1)
output : Tensor
infer_meta :
func : DiagonalInferMeta
kernel :
func : diagonal
backward : diagonal_grad
- api : erf - api : erf
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
...@@ -5,6 +5,19 @@ ...@@ -5,6 +5,19 @@
outputs : outputs :
out : Out out : Out
- api : cross
inputs : {x : X, y : Y}
attrs :
axis : dim
outputs :
out : Out
- api : diagonal
inputs :
x : Input
outputs :
out : Out
- api : trace - api : trace
inputs : inputs :
x : Input x : Input
......
...@@ -28,6 +28,29 @@ ...@@ -28,6 +28,29 @@
kernel : kernel :
func : cholesky_solve_grad func : cholesky_solve_grad
- backward_api : cross_grad
forward : cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : cross_grad
data_type : out_grad
- backward_api : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : diagonal_grad
data_type : out_grad
no_need_buffer : x
- backward_api : erf_grad - backward_api : erf_grad
forward : erf (Tensor x) -> Tensor(out) forward : erf (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
...@@ -412,15 +412,6 @@ ...@@ -412,15 +412,6 @@
func : cosh func : cosh
backward : cosh_grad backward : cosh_grad
- api : cross
args : (Tensor x, Tensor y, int axis = 9)
output : Tensor
infer_meta :
func : CrossInferMeta
kernel :
func : cross
backward : cross_grad
# Part of python API paddle.nn.functional.cross_entropy # Part of python API paddle.nn.functional.cross_entropy
- api : cross_entropy_with_softmax - api : cross_entropy_with_softmax
args : (Tensor input, Tensor label, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis) args : (Tensor input, Tensor label, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis)
...@@ -500,15 +491,6 @@ ...@@ -500,15 +491,6 @@
kernel : kernel :
func : diag func : diag
- api : diagonal
args : (Tensor x, int offset, int axis1, int axis2)
output : Tensor
infer_meta :
func : DiagonalInferMeta
kernel :
func : diagonal
backward : diagonal_grad
- api : digamma - api : digamma
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
...@@ -425,16 +425,6 @@ ...@@ -425,16 +425,6 @@
data_type : softmax data_type : softmax
inplace : (softmax -> input_grad) inplace : (softmax -> input_grad)
- backward_api : cross_grad
forward : cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : cross_grad
- backward_api : cumprod_grad - backward_api : cumprod_grad
forward : cumprod (Tensor x, int dim) -> Tensor(out) forward : cumprod (Tensor x, int dim) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int dim) args : (Tensor x, Tensor out, Tensor out_grad, int dim)
...@@ -508,17 +498,6 @@ ...@@ -508,17 +498,6 @@
kernel : kernel :
func : determinant_grad func : determinant_grad
- backward_api : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : diagonal_grad
no_need_buffer : x
- backward_api : digamma_grad - backward_api : digamma_grad
forward : digamma (Tensor x) -> Tensor(out) forward : digamma (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册