未验证 提交 ea4b2c5e 编写于 作者: F freeliuzc 提交者: GitHub

[phi] move inverse op from fluid to phi (#44471)

* move inverse from fluid to phi with unitest bug

* fix bug, add eager op yaml
上级 8ccbb863
......@@ -12,57 +12,23 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/inverse_op.h"
#include <string>
#include <unordered_map>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/matrix_inverse.h"
namespace paddle {
namespace operators {
class InverseOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "Inverse");
OP_INOUT_CHECK(ctx->HasOutput("Output"), "Output", "Output", "Inverse");
auto input_dims = ctx->GetInputDim("Input");
int64_t input_rank = input_dims.size();
PADDLE_ENFORCE_GE(
input_rank,
2,
platform::errors::InvalidArgument(
"The dimension of Input(Input) is expected to be no less than 2. "
"But received: Input(Input)'s dimension = %d, shape = [%s].",
input_rank,
input_dims));
for (int64_t i = 0; i < input_rank; ++i) {
PADDLE_ENFORCE_EQ(
(input_dims[i] == -1) || (input_dims[i] > 0),
true,
platform::errors::InvalidArgument(
"Each dimension of input tensor is expected to be -1 or a "
"positive number, but received %d. Input's shape is [%s].",
input_dims[i],
input_dims));
}
if (input_dims[input_rank - 2] > 0 && input_dims[input_rank - 1] > 0) {
PADDLE_ENFORCE_EQ(input_dims[input_rank - 2],
input_dims[input_rank - 1],
platform::errors::InvalidArgument(
"The last two dimensions are expected to be equal. "
"But received: %d and %d; "
"Input(Input)'s shape = [%s].",
input_dims[input_rank - 2],
input_dims[input_rank - 1],
input_dims));
}
ctx->SetOutputDim("Output", input_dims);
ctx->ShareLoD("Input", /*->*/ "Output");
}
};
class InverseOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
......@@ -78,19 +44,6 @@ class InverseOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
class InverseGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
auto input_grad = framework::GradVarName("Input");
auto output_grad = framework::GradVarName("Output");
OP_INOUT_CHECK(ctx->HasInput("Output"), "Input", "Output", "InverseGrad");
OP_INOUT_CHECK(
ctx->HasInput(output_grad), "Input", output_grad, "InverseGrad");
if (ctx->HasOutput(input_grad)) {
ctx->SetOutputDim(input_grad, ctx->GetInputDim(output_grad));
}
}
};
class InverseOpMaker : public framework::OpProtoAndCheckerMaker {
......@@ -128,18 +81,23 @@ class InverseGradOpMaker : public framework::SingleGradOpMaker<T> {
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(inverse,
InverseInferShapeFunctor,
PD_INFER_META(phi::InverseInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(inverse_grad,
InverseGradInferShapeFunctor,
PD_INFER_META(phi::InverseGradInferMeta));
REGISTER_OPERATOR(inverse,
ops::InverseOp,
ops::InverseOpMaker,
ops::InverseOpInferVarType,
ops::InverseGradOpMaker<paddle::framework::OpDesc>,
ops::InverseGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(inverse_grad, ops::InverseGradOp);
ops::InverseGradOpMaker<paddle::imperative::OpBase>,
InverseInferShapeFunctor);
REGISTER_OP_CPU_KERNEL(inverse,
ops::InverseKernel<phi::CPUContext, float>,
ops::InverseKernel<phi::CPUContext, double>);
REGISTER_OP_CPU_KERNEL(inverse_grad,
ops::InverseGradKernel<phi::CPUContext, float>,
ops::InverseGradKernel<phi::CPUContext, double>);
REGISTER_OPERATOR(inverse_grad,
ops::InverseGradOp,
InverseGradInferShapeFunctor);
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/inverse_op.h"
namespace ops = paddle::operators;
REGISTER_OP_CUDA_KERNEL(
inverse,
ops::InverseKernel<paddle::platform::CUDADeviceContext, float>,
ops::InverseKernel<paddle::platform::CUDADeviceContext, double>);
REGISTER_OP_CUDA_KERNEL(
inverse_grad,
ops::InverseGradKernel<paddle::platform::CUDADeviceContext, float>,
ops::InverseGradKernel<paddle::platform::CUDADeviceContext, double>);
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/matrix_inverse.h"
namespace paddle {
namespace operators {
template <typename DeviceContext, typename T>
class InverseKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* input = context.Input<framework::Tensor>("Input");
auto* output = context.Output<framework::Tensor>("Output");
output->mutable_data<T>(context.GetPlace());
auto& dev_ctx = context.template device_context<DeviceContext>();
phi::funcs::MatrixInverseFunctor<DeviceContext, T> mat_inv;
mat_inv(dev_ctx, *input, output);
}
};
template <typename DeviceContext, typename T>
class InverseGradKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& context) const override {
auto* a_inv = context.Input<framework::Tensor>("Output");
auto* a_inv_grad =
context.Input<framework::Tensor>(framework::GradVarName("Output"));
auto* a_grad =
context.Output<framework::Tensor>(framework::GradVarName("Input"));
if (a_grad) {
a_grad->mutable_data<T>(context.GetPlace());
auto blas = phi::funcs::GetBlas<DeviceContext, T>(context);
auto& dev_ctx = context.template device_context<DeviceContext>();
framework::Tensor tmp_out =
context.AllocateTmpTensor<T, DeviceContext>(a_inv->dims(), dev_ctx);
auto mat_dim_a0 =
phi::funcs::CreateMatrixDescriptor(a_inv_grad->dims(), 0, false);
auto mat_dim_b0 =
phi::funcs::CreateMatrixDescriptor(a_inv->dims(), 0, true);
blas.MatMul(
*a_inv_grad, mat_dim_a0, *a_inv, mat_dim_b0, T(1), &tmp_out, T(0));
auto mat_dim_a1 =
phi::funcs::CreateMatrixDescriptor(a_inv->dims(), 0, true);
auto mat_dim_b1 =
phi::funcs::CreateMatrixDescriptor(tmp_out.dims(), 0, false);
blas.MatMul(*a_inv, mat_dim_a1, tmp_out, mat_dim_b1, T(-1), a_grad, T(0));
}
}
};
} // namespace operators
} // namespace paddle
......@@ -1042,6 +1042,15 @@
intermediate : saved_mean, saved_variance
backward : instance_norm_grad
- api : inverse
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : InverseInferMeta
kernel :
func : inverse
backward : inverse_grad
# is_empty
- api : is_empty
args : (Tensor x)
......
......@@ -967,6 +967,15 @@
optional : scale
backward : instance_norm_double_grad
- backward_api : inverse_grad
forward : inverse(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta:
func : InverseGradInferMeta
kernel :
func : inverse_grad
- backward_api : kldiv_loss_grad
forward : kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, str reduction)
......
......@@ -403,6 +403,14 @@ void InstanceNormDoubleGradInferMeta(const MetaTensor& x,
}
}
void InverseGradInferMeta(const MetaTensor& out,
const MetaTensor& dout,
MetaTensor* dx) {
if (dx) {
dx->set_dims(dout.dims());
}
}
void KernelWithXShapeInferMeta(const MetaTensor& xshape, MetaTensor* dx) {
auto xshape_dims = xshape.dims();
auto x_dims = phi::slice_ddim(xshape_dims, 1, xshape_dims.size());
......
......@@ -183,6 +183,10 @@ void InstanceNormDoubleGradInferMeta(const MetaTensor& x,
MetaTensor* dscale,
MetaTensor* ddy);
void InverseGradInferMeta(const MetaTensor& out,
const MetaTensor& dout,
MetaTensor* dx);
void KernelWithXShapeInferMeta(const MetaTensor& xshape, MetaTensor* dx);
void MaxPoolWithIndexGradInferMeta(const MetaTensor& x,
......
......@@ -1025,6 +1025,43 @@ void InferMetaFromVecValue(const MetaTensor& x,
}
}
void InverseInferMeta(const MetaTensor& x, MetaTensor* out) {
auto input_dims = x.dims();
int64_t input_rank = input_dims.size();
PADDLE_ENFORCE_GE(
input_rank,
2,
errors::InvalidArgument(
"The dimension of Input(Input) is expected to be no less than 2. "
"But received: Input(Input)'s dimension = %d, shape = [%s].",
input_rank,
input_dims));
for (int64_t i = 0; i < input_rank; ++i) {
PADDLE_ENFORCE_EQ(
(input_dims[i] == -1) || (input_dims[i] > 0),
true,
errors::InvalidArgument(
"Each dimension of input tensor is expected to be -1 or a "
"positive number, but received %d. Input's shape is [%s].",
input_dims[i],
input_dims));
}
if (input_dims[input_rank - 2] > 0 && input_dims[input_rank - 1] > 0) {
PADDLE_ENFORCE_EQ(input_dims[input_rank - 2],
input_dims[input_rank - 1],
errors::InvalidArgument(
"The last two dimensions are expected to be equal. "
"But received: %d and %d; "
"Input(Input)'s shape = [%s].",
input_dims[input_rank - 2],
input_dims[input_rank - 1],
input_dims));
}
out->set_dims(input_dims);
out->share_lod(x);
}
void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out) {
out->set_dims(phi::make_ddim({1}));
out->set_dtype(DataType::BOOL);
......
......@@ -146,6 +146,8 @@ void InferMetaFromVecValue(const MetaTensor& x,
const std::vector<int64_t>& shape,
MetaTensor* out);
void InverseInferMeta(const MetaTensor& x, MetaTensor* out);
void IsEmptyInferMeta(const MetaTensor& x, MetaTensor* out);
void IsfiniteInferMeta(const MetaTensor& input, MetaTensor* out);
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/impl/inverse_grad_kernel_impl.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(
inverse_grad, CPU, ALL_LAYOUT, phi::InverseGradKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/impl/inverse_kernel_impl.h"
#include "paddle/phi/core/kernel_registry.h"
PD_REGISTER_KERNEL(
inverse, CPU, ALL_LAYOUT, phi::InverseKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/inverse_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/inverse_grad_kernel_impl.h"
PD_REGISTER_KERNEL(
inverse_grad, GPU, ALL_LAYOUT, phi::InverseGradKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/inverse_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/inverse_kernel_impl.h"
PD_REGISTER_KERNEL(
inverse, GPU, ALL_LAYOUT, phi::InverseKernel, float, double) {}
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/inverse_grad_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/matrix_inverse.h"
namespace phi {
template <typename T, typename Context>
void InverseGradKernel(const Context& dev_ctx,
const DenseTensor& out,
const DenseTensor& out_grad,
DenseTensor* in_grad) {
if (in_grad) {
dev_ctx.template Alloc<T>(in_grad);
auto blas = phi::funcs::GetBlas<Context, T>(dev_ctx);
DenseTensor tmp_out;
tmp_out.Resize(out.dims());
dev_ctx.template Alloc<T>(&tmp_out);
auto mat_dim_a0 =
phi::funcs::CreateMatrixDescriptor(out_grad.dims(), 0, false);
auto mat_dim_b0 = phi::funcs::CreateMatrixDescriptor(out.dims(), 0, true);
blas.MatMul(out_grad, mat_dim_a0, out, mat_dim_b0, T(1), &tmp_out, T(0));
auto mat_dim_a1 = phi::funcs::CreateMatrixDescriptor(out.dims(), 0, true);
auto mat_dim_b1 =
phi::funcs::CreateMatrixDescriptor(tmp_out.dims(), 0, false);
blas.MatMul(out, mat_dim_a1, tmp_out, mat_dim_b1, T(-1), in_grad, T(0));
}
}
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/kernels/inverse_kernel.h"
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/matrix_inverse.h"
namespace phi {
template <typename T, typename Context>
void InverseKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
phi::funcs::MatrixInverseFunctor<Context, T> mat_inv;
mat_inv(dev_ctx, x, out);
}
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/core/dense_tensor.h"
namespace phi {
template <typename T, typename Context>
void InverseGradKernel(const Context& dev_ctx,
const DenseTensor& out,
const DenseTensor& out_grad,
DenseTensor* in_grad);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/kernels/funcs/matrix_inverse.h"
namespace phi {
template <typename T, typename Context>
void InverseKernel(const Context& dev_ctx,
const DenseTensor& x,
DenseTensor* out);
} // namespace phi
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature InverseGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"inverse_grad", {"Output", "Output@GRAD"}, {}, {"Input@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(inverse_grad, phi::InverseGradOpArgumentMapping);
......@@ -25,6 +25,7 @@ class TestInverseOp(OpTest):
def config(self):
self.matrix_shape = [10, 10]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
def setUp(self):
self.op_type = "inverse"
......@@ -38,10 +39,10 @@ class TestInverseOp(OpTest):
self.outputs = {'Output': inverse}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_grad(self):
self.check_grad(['Input'], 'Output')
self.check_grad(['Input'], 'Output', check_eager=True)
class TestInverseOpBatched(TestInverseOp):
......@@ -49,6 +50,7 @@ class TestInverseOpBatched(TestInverseOp):
def config(self):
self.matrix_shape = [8, 4, 4]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
class TestInverseOpLarge(TestInverseOp):
......@@ -56,9 +58,13 @@ class TestInverseOpLarge(TestInverseOp):
def config(self):
self.matrix_shape = [32, 32]
self.dtype = "float64"
self.python_api = paddle.tensor.math.inverse
def test_grad(self):
self.check_grad(['Input'], 'Output', max_relative_error=1e-6)
self.check_grad(['Input'],
'Output',
max_relative_error=1e-6,
check_eager=True)
class TestInverseOpFP32(TestInverseOp):
......@@ -66,9 +72,13 @@ class TestInverseOpFP32(TestInverseOp):
def config(self):
self.matrix_shape = [10, 10]
self.dtype = "float32"
self.python_api = paddle.tensor.math.inverse
def test_grad(self):
self.check_grad(['Input'], 'Output', max_relative_error=1e-2)
self.check_grad(['Input'],
'Output',
max_relative_error=1e-2,
check_eager=True)
class TestInverseOpBatchedFP32(TestInverseOpFP32):
......@@ -76,6 +86,7 @@ class TestInverseOpBatchedFP32(TestInverseOpFP32):
def config(self):
self.matrix_shape = [8, 4, 4]
self.dtype = "float32"
self.python_api = paddle.tensor.math.inverse
class TestInverseOpLargeFP32(TestInverseOpFP32):
......@@ -83,6 +94,7 @@ class TestInverseOpLargeFP32(TestInverseOpFP32):
def config(self):
self.matrix_shape = [32, 32]
self.dtype = "float32"
self.python_api = paddle.tensor.math.inverse
class TestInverseAPI(unittest.TestCase):
......
......@@ -1932,7 +1932,9 @@ def inverse(x, name=None):
print(inv) # [[0.5, 0], [0, 0.5]]
"""
if paddle.in_dynamic_mode():
if in_dygraph_mode():
return _C_ops.final_state_inverse(x)
elif paddle.in_dynamic_mode():
return _C_ops.inverse(x)
def _check_input(x):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册