diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index eafb8ade9e53b5a84aed910c103e83fc2f723a5a..6a48378dc29d8a07c330bcfb5841347c5ecd1260 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -150,5 +150,19 @@ extern inline bool IsComplexType(const proto::VarType::Type type) { extern proto::VarType::Type PromoteTypesIfComplexExists( const proto::VarType::Type type_a, const proto::VarType::Type type_b); +extern inline proto::VarType::Type ToComplexType(proto::VarType::Type t) { + switch (t) { + case proto::VarType::FP32: + return proto::VarType::COMPLEX64; + case proto::VarType::FP64: + return proto::VarType::COMPLEX128; + default: + PADDLE_THROW(platform::errors::Unimplemented( + "Unknown complex value data type (%s), now only support float32 and " + "float64.", + DataTypeToString(t))); + } +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index 9f5d8d30c9cdea0cfa9db1793e18874bcd42aae0..f721caaae9c7d9943b9ebabc7bbea0bcca559352 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -60,7 +60,7 @@ void* Tensor::mutable_data(const platform::Place& place, requested_size, size, platform::errors::InvalidArgument( "The requested memory size is less than the memory size of Tensor. " - "But received requested memory size is d%, " + "But received requested memory size is %d, " "memory size of Tensor is %d.", requested_size, size)); size = requested_size; diff --git a/paddle/fluid/operators/conj_op.cc b/paddle/fluid/operators/conj_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..3afe4f1e3d1027ce37404544dcd0929cc41cb6a3 --- /dev/null +++ b/paddle/fluid/operators/conj_op.cc @@ -0,0 +1,87 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/conj_op.h" + +#include +#include +#include +#include +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + +namespace paddle { +namespace operators { + +class ConjOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "conj"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "conj"); + + auto in_dims = ctx->GetInputDim("X"); + + ctx->SetOutputDim("Out", in_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class ConjOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of conj op."); + AddOutput("Out", "(Tensor), The output tensor of conj op."); + AddComment(R"DOC( +Conj Operator. + +This operator is used to perform elementwise conjugate for input $X$. + +)DOC"); + } +}; + +template +class ConjGradMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + void Apply(GradOpPtr retv) const override { + retv->SetType("conj"); + retv->SetInput("X", this->OutputGrad("Out")); + retv->SetAttrMap(this->Attrs()); + retv->SetOutput("Out", this->InputGrad("X")); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(conj, ops::ConjOp, ops::ConjOpMaker, + ops::ConjGradMaker, + ops::ConjGradMaker); + +REGISTER_OP_CPU_KERNEL( + conj, ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel); diff --git a/paddle/fluid/operators/conj_op.cu b/paddle/fluid/operators/conj_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..601caeb50558876b972014813ca6dc247aecfeba --- /dev/null +++ b/paddle/fluid/operators/conj_op.cu @@ -0,0 +1,28 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/conj_op.h" +#include "paddle/fluid/platform/complex128.h" +#include "paddle/fluid/platform/complex64.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + conj, ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel, + ops::ConjKernel); diff --git a/paddle/fluid/operators/conj_op.h b/paddle/fluid/operators/conj_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0bec7b707e3692662da17c1a592fc12a76f4a293 --- /dev/null +++ b/paddle/fluid/operators/conj_op.h @@ -0,0 +1,85 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { +using Tensor = framework::Tensor; + +template +using EnableComplex = + typename std::enable_if::value || + std::is_same::value>::type; + +template +using DisableComplex = typename std::enable_if< + !std::is_same::value && + !std::is_same::value>::type; + +template +struct ConjFunctor; + +template +struct ConjFunctor> { + ConjFunctor(const T* input, int64_t numel, T* output) + : input_(input), numel_(numel), output_(output) {} + + HOSTDEVICE void operator()(size_t idx) const { + output_[idx] = T(input_[idx].real, -input_[idx].imag); + } + const T* input_; + int64_t numel_; + T* output_; +}; + +template +struct ConjFunctor> { + ConjFunctor(const T* input, int64_t numel, T* output) + : input_(input), numel_(numel), output_(output) {} + + HOSTDEVICE void operator()(size_t idx) const { output_[idx] = input_[idx]; } + const T* input_; + int64_t numel_; + T* output_; +}; + +template +class ConjKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* x = context.Input("X"); + Tensor* out = context.Output("Out"); + + auto numel = x->numel(); + auto* x_data = x->data(); + auto* out_data = out->mutable_data(context.GetPlace(), + size_t(x->numel() * sizeof(T))); + + auto& dev_ctx = context.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + ConjFunctor functor(x_data, numel, out_data); + for_range(functor); + } +}; + +DECLARE_INPLACE_OP_INFERER(ConjOpInplaceInferer, {"X", "Out"}); + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index cc85c295965ba484943833a8a094bca1bd4fc867..aac0337fe307bc6451c012e7575ff2a1ad8df9d7 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -143,7 +143,9 @@ REGISTER_OP_CPU_KERNEL(fill_constant, ops::FillConstantKernel, ops::FillConstantKernel, ops::FillConstantKernel, ops::FillConstantKernel, - ops::FillConstantKernel); + ops::FillConstantKernel, + ops::FillConstantKernel, + ops::FillConstantKernel); REGISTER_OP_VERSION(fill_constant) .AddCheckpoint( diff --git a/paddle/fluid/operators/fill_constant_op.cu.cc b/paddle/fluid/operators/fill_constant_op.cu.cc index 4a7b0110a1d965332471791951ab09bf4c998370..78c62a4053b6413a892ad761618bfc787b1b9609 100644 --- a/paddle/fluid/operators/fill_constant_op.cu.cc +++ b/paddle/fluid/operators/fill_constant_op.cu.cc @@ -20,4 +20,6 @@ REGISTER_OP_CUDA_KERNEL(fill_constant, ops::FillConstantKernel, ops::FillConstantKernel, ops::FillConstantKernel, ops::FillConstantKernel, - ops::FillConstantKernel); + ops::FillConstantKernel, + ops::FillConstantKernel, + ops::FillConstantKernel); diff --git a/paddle/fluid/operators/fill_constant_op_xpu.cc b/paddle/fluid/operators/fill_constant_op_xpu.cc index 2bf836272a400d6b57a5fe3bde23af45b53d4503..16dd4c9292f89a05d58cfc1d821c5a43f45f5add 100644 --- a/paddle/fluid/operators/fill_constant_op_xpu.cc +++ b/paddle/fluid/operators/fill_constant_op_xpu.cc @@ -19,5 +19,7 @@ REGISTER_OP_XPU_KERNEL(fill_constant, ops::FillConstantKernel, ops::FillConstantKernel, ops::FillConstantKernel, ops::FillConstantKernel, - ops::FillConstantKernel); + ops::FillConstantKernel, + ops::FillConstantKernel, + ops::FillConstantKernel); #endif diff --git a/paddle/fluid/operators/imag_op.cc b/paddle/fluid/operators/imag_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..899025ae7093b45833805687c9d499e2d1fa02e7 --- /dev/null +++ b/paddle/fluid/operators/imag_op.cc @@ -0,0 +1,106 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/imag_op.h" + +namespace paddle { +namespace operators { + +class ImagOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Imag"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Imag"); + + auto x_dims = ctx->GetInputDim("X"); + ctx->SetOutputDim("Out", x_dims); + ctx->ShareLoD("X", "Out"); + } +}; + +class ImagOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of imag op."); + AddOutput("Out", "(Tensor), The output tensor of imag op."); + AddComment(R"DOC( +Imag Operator. + +This operator is used to get a new tensor containing imaginary values +from a tensor with complex data type. + +)DOC"); + } +}; + +class ImagGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@Grad", "ImagGrad"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + "X@Grad", "ImagGrad"); + + auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); + ctx->SetOutputDim(framework::GradVarName("X"), dout_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto dtype = OperatorWithKernel::IndicateVarDataType( + ctx, framework::GradVarName("Out")); + auto complex_dtype = framework::ToComplexType(dtype); + return framework::OpKernelType(complex_dtype, ctx.GetPlace()); + } +}; + +template +class ImagGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("imag_grad"); + grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + } +}; + +DECLARE_INPLACE_OP_INFERER(ImagOpInplaceInferer, {"X", "Out"}); +DECLARE_INPLACE_OP_INFERER(ImagGradOpInplaceInferer, + {framework::GradVarName("Out"), + framework::GradVarName("X")}); + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(imag, ops::ImagOp, ops::ImagOpMaker, + ops::ImagGradOpMaker, + ops::ImagGradOpMaker); +REGISTER_OPERATOR(imag_grad, ops::ImagGradOp); + +REGISTER_OP_CPU_KERNEL(imag, ops::ImagKernel, + ops::ImagKernel); +REGISTER_OP_CPU_KERNEL(imag_grad, + ops::ImagGradKernel, + ops::ImagGradKernel); diff --git a/paddle/fluid/operators/imag_op.cu b/paddle/fluid/operators/imag_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..a7a3b1368219891dc5d98e25f4c38be5ad216baf --- /dev/null +++ b/paddle/fluid/operators/imag_op.cu @@ -0,0 +1,28 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/imag_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL(imag, + ops::ImagKernel, + ops::ImagKernel); +REGISTER_OP_CUDA_KERNEL(imag_grad, + ops::ImagGradKernel, + ops::ImagGradKernel); diff --git a/paddle/fluid/operators/imag_op.h b/paddle/fluid/operators/imag_op.h new file mode 100644 index 0000000000000000000000000000000000000000..562a8dffa90623ed44c51ff1048c25550f5a7ce7 --- /dev/null +++ b/paddle/fluid/operators/imag_op.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/complex_functors.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +class ImagKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + const framework::Tensor* x = ctx.Input("X"); + framework::Tensor* out = ctx.Output("Out"); + + auto numel = x->numel(); + auto* x_data = x->data(); + auto* out_data = out->mutable_data>( + ctx.GetPlace(), static_cast(numel * sizeof(math::Real))); + + auto& dev_ctx = ctx.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + math::ImagFunctor functor(x_data, out_data, numel); + for_range(functor); + } +}; + +template +class ImagGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + const framework::Tensor* d_out = + ctx.Input(framework::GradVarName("Out")); + framework::Tensor* d_x = + ctx.Output(framework::GradVarName("X")); + + auto numel = d_out->numel(); + auto* dout_data = d_out->data>(); + auto* dx_data = d_x->mutable_data( + ctx.GetPlace(), static_cast(numel * sizeof(T))); + + auto& dev_ctx = ctx.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + math::ImagToComplexFunctor functor(dout_data, dx_data, numel); + for_range(functor); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/complex_functors.h b/paddle/fluid/operators/math/complex_functors.h new file mode 100644 index 0000000000000000000000000000000000000000..302e3d562c65bbbc1e18c61ee1f55fe9dba016a4 --- /dev/null +++ b/paddle/fluid/operators/math/complex_functors.h @@ -0,0 +1,140 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include "paddle/fluid/platform/complex128.h" +#include "paddle/fluid/platform/complex64.h" +#include "paddle/fluid/platform/hostdevice.h" + +namespace paddle { +namespace operators { +namespace math { + +template +struct cond { + static constexpr bool value = B; + using type = T; +}; + +template +struct eval_if { + using type = typename TrueF::type; +}; + +template +struct eval_if { + using type = typename FalseF::type; +}; + +template +using eval_if_t = typename eval_if::type; + +template +struct select { + using type = eval_if_t>; +}; + +template +using select_t = typename select::type; + +template +using Real = + select_t::value, float>, + cond::value, double>, T>; + +template +using Complex = typename std::enable_if::value>::type; + +// There are no NoComplex cases now, implement later if needed +template +using NoComplex = typename std::enable_if::value>::type; + +template +struct RealFunctor; + +template +struct RealFunctor>> { + public: + RealFunctor(const T* input, Real* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = input_[idx].real; + } + + private: + const T* input_; + Real* output_; + int64_t numel_; +}; + +template +struct ImagFunctor; + +template +struct ImagFunctor>> { + ImagFunctor(const T* input, Real* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx] = input_[idx].imag; + } + + const T* input_; + Real* output_; + int64_t numel_; +}; + +template +struct RealToComplexFunctor; + +template +struct RealToComplexFunctor>> { + RealToComplexFunctor(const Real* input, T* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx].real = input_[idx]; + output_[idx].imag = 0; + } + + const Real* input_; + T* output_; + int64_t numel_; +}; + +template +struct ImagToComplexFunctor; + +template +struct ImagToComplexFunctor>> { + ImagToComplexFunctor(const Real* input, T* output, int64_t numel) + : input_(input), output_(output), numel_(numel) {} + + HOSTDEVICE void operator()(int64_t idx) const { + output_[idx].real = 0; + output_[idx].imag = input_[idx]; + } + + const Real* input_; + T* output_; + int64_t numel_; +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index 71ef5a962f0989f2ed0c93bfe0c392376c0f084c..5afda787339dbe714ba6c82e3c34d39eb6d75580 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -54,6 +54,8 @@ template struct SetConstant; template struct SetConstant; template struct SetConstant; template struct SetConstant; +template struct SetConstant; +template struct SetConstant; #endif #define DEFINE_CPU_TRANS(RANK) \ diff --git a/paddle/fluid/operators/real_op.cc b/paddle/fluid/operators/real_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5f667999ee613961c44195836bcd36b0530a5c36 --- /dev/null +++ b/paddle/fluid/operators/real_op.cc @@ -0,0 +1,105 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/real_op.h" + +namespace paddle { +namespace operators { + +class RealOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "Real"); + OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "Real"); + + auto x_dims = ctx->GetInputDim("X"); + ctx->SetOutputDim("Out", x_dims); + ctx->ShareLoD("X", "Out"); + } +}; + +class RealOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor), The input tensor of real op."); + AddOutput("Out", "(Tensor), The output tensor of real op."); + AddComment(R"DOC( +Real Operator. + +This operator is used to get a new tensor containing real values +from a tensor with complex data type. + +)DOC"); + } +}; + +class RealGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input", + "Out@Grad", "RealGrad"); + OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")), "Output", + "X@Grad", "RealGrad"); + + auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out")); + ctx->SetOutputDim(framework::GradVarName("X"), dout_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto dtype = OperatorWithKernel::IndicateVarDataType( + ctx, framework::GradVarName("Out")); + auto complex_dtype = framework::ToComplexType(dtype); + return framework::OpKernelType(complex_dtype, ctx.GetPlace()); + } +}; + +template +class RealGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + void Apply(GradOpPtr grad_op) const override { + grad_op->SetType("real_grad"); + grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); + } +}; + +DECLARE_INPLACE_OP_INFERER(RealOpInplaceInferer, {"X", "Out"}); +DECLARE_INPLACE_OP_INFERER(RealGradOpInplaceInferer, + {framework::GradVarName("Out"), + framework::GradVarName("X")}); + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(real, ops::RealOp, ops::RealOpMaker, + ops::RealGradOpMaker<::paddle::framework::OpDesc>, + ops::RealGradOpMaker<::paddle::imperative::OpBase>); +REGISTER_OPERATOR(real_grad, ops::RealGradOp); + +REGISTER_OP_CPU_KERNEL(real, ops::RealKernel, + ops::RealKernel); +REGISTER_OP_CPU_KERNEL(real_grad, + ops::RealGradKernel, + ops::RealGradKernel); diff --git a/paddle/fluid/operators/real_op.cu b/paddle/fluid/operators/real_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..b3d0855111b72f3eba4d9e737b4b650042f7238a --- /dev/null +++ b/paddle/fluid/operators/real_op.cu @@ -0,0 +1,28 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/real_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_CUDA_KERNEL(real, + ops::RealKernel, + ops::RealKernel); +REGISTER_OP_CUDA_KERNEL(real_grad, + ops::RealGradKernel, + ops::RealGradKernel); diff --git a/paddle/fluid/operators/real_op.h b/paddle/fluid/operators/real_op.h new file mode 100644 index 0000000000000000000000000000000000000000..6cc9065269c62716b54c329d46711ff96f83f015 --- /dev/null +++ b/paddle/fluid/operators/real_op.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/complex_functors.h" +#include "paddle/fluid/platform/for_range.h" + +namespace paddle { +namespace operators { + +template +class RealKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + const framework::Tensor* x = ctx.Input("X"); + framework::Tensor* out = ctx.Output("Out"); + + auto numel = x->numel(); + auto* x_data = x->data(); + auto* out_data = out->mutable_data>( + ctx.GetPlace(), static_cast(numel * sizeof(math::Real))); + + auto& dev_ctx = ctx.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + math::RealFunctor functor(x_data, out_data, numel); + for_range(functor); + } +}; + +template +class RealGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + const framework::Tensor* d_out = + ctx.Input(framework::GradVarName("Out")); + framework::Tensor* d_x = + ctx.Output(framework::GradVarName("X")); + + auto numel = d_out->numel(); + auto* dout_data = d_out->data>(); + auto* dx_data = d_x->mutable_data( + ctx.GetPlace(), static_cast(numel * sizeof(T))); + + auto& dev_ctx = ctx.template device_context(); + platform::ForRange for_range(dev_ctx, numel); + math::RealToComplexFunctor functor(dout_data, dx_data, numel); + for_range(functor); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 908e06b96e493c825537e81c09caf992bb2a4608..1d5f4cc1dff5265957987b5b38f7ee781d88ec2a 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -51,6 +51,8 @@ from .tensor.random import bernoulli from .tensor.attribute import rank #DEFINE_ALIAS from .tensor.attribute import shape #DEFINE_ALIAS +from .tensor.attribute import real #DEFINE_ALIAS +from .tensor.attribute import imag #DEFINE_ALIAS from .tensor.creation import to_tensor #DEFINE_ALIAS from .tensor.creation import diag #DEFINE_ALIAS from .tensor.creation import eye #DEFINE_ALIAS @@ -196,6 +198,7 @@ from .tensor.math import isinf #DEFINE_ALIAS from .tensor.math import isnan #DEFINE_ALIAS from .tensor.math import prod #DEFINE_ALIAS from .tensor.math import broadcast_shape #DEFINE_ALIAS +from .tensor.math import conj #DEFINE_ALIAS from .tensor.random import multinomial #DEFINE_ALIAS from .tensor.random import standard_normal diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index bec82ee3c3a6848988c2ea5739fd6bd7a8574b45..bd38bae42e0a6e5aed8f689426c6aa03aaca0843 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -145,8 +145,11 @@ def get_numeric_gradient(place, return numpy_tensor[i] elif tensor_to_check_dtype == np.float32: return tensor._get_float_element(i) - else: + elif tensor_to_check_dtype == np.float64: return tensor._get_double_element(i) + else: + raise TypeError("Unsupported test data type %s." % + tensor_to_check_dtype) def __set_elem__(tensor, i, e): if tensor_to_check_dtype == np.float16: @@ -158,8 +161,11 @@ def get_numeric_gradient(place, tensor.set(numpy_tensor, place) elif tensor_to_check_dtype == np.float32: tensor._set_float_element(i, e) - else: + elif tensor_to_check_dtype == np.float64: tensor._set_double_element(i, e) + else: + raise TypeError("Unsupported test data type %s." % + tensor_to_check_dtype) # we only compute gradient of one element each time. # we use a for loop to compute the gradient of every element. @@ -1329,14 +1335,15 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005, user_defined_grads=None, + user_defined_grad_outputs=None, check_dygraph=True): self._check_grad_helper() places = self._get_places() for place in places: - self.check_grad_with_place(place, inputs_to_check, output_names, - no_grad_set, numeric_grad_delta, - in_place, max_relative_error, - user_defined_grads, check_dygraph) + self.check_grad_with_place( + place, inputs_to_check, output_names, no_grad_set, + numeric_grad_delta, in_place, max_relative_error, + user_defined_grads, user_defined_grad_outputs, check_dygraph) def check_grad_with_place(self, place, @@ -1347,6 +1354,7 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005, user_defined_grads=None, + user_defined_grad_outputs=None, check_dygraph=True): self.scope = core.Scope() op_inputs = self.inputs if hasattr(self, "inputs") else dict() @@ -1412,15 +1420,18 @@ class OpTest(unittest.TestCase): delta=numeric_grad_delta, in_place=in_place) for input_to_check in inputs_to_check ] + analytic_grads = self._get_gradient(inputs_to_check, place, - output_names, no_grad_set) + output_names, no_grad_set, + user_defined_grad_outputs) self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check, max_relative_error, "Gradient Check On %s" % str(place)) if check_dygraph: - dygraph_grad = self._get_dygraph_grad(inputs_to_check, place, - output_names, no_grad_set) + dygraph_grad = self._get_dygraph_grad( + inputs_to_check, place, output_names, user_defined_grad_outputs, + no_grad_set) self._assert_is_close(numeric_grads, dygraph_grad, inputs_to_check, max_relative_error, "Gradient Check On %s" % str(place)) @@ -1438,6 +1449,7 @@ class OpTest(unittest.TestCase): inputs_to_check, place, output_names, + user_defined_grad_outputs=None, no_grad_set=None): with fluid.dygraph.base.guard(place=place): block = fluid.default_main_program().global_block() @@ -1469,62 +1481,74 @@ class OpTest(unittest.TestCase): outputs_valid[output_name] = self._find_var_in_dygraph( outputs, output_name) - if len(outputs_valid) == 1: - loss = block.create_var( - dtype=self.dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=False, - shape=[1]) - for outputs_valid_key in outputs_valid: + if user_defined_grad_outputs is None: + if len(outputs_valid) == 1: + loss = block.create_var( + dtype=self.dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=False, + shape=[1]) + for outputs_valid_key in outputs_valid: + block.append_op( + type="mean", + inputs={"X": outputs_valid[outputs_valid_key]}, + outputs={"Out": [loss]}, + attrs=None) + else: + avg_sum = [] + for cur_loss in outputs_valid: + cur_avg_loss = block.create_var( + dtype=self.dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=False) + block.append_op( + type="mean", + inputs={"X": outputs_valid[cur_loss]}, + outputs={"Out": [cur_avg_loss]}, + attrs=None) + avg_sum.append(cur_avg_loss) + loss_sum = block.create_var( + dtype=self.dtype, + type=core.VarDesc.VarType.LOD_TENSOR, + persistable=False, + stop_gradient=False, + shape=[1]) block.append_op( - type="mean", - inputs={"X": outputs_valid[outputs_valid_key]}, - outputs={"Out": [loss]}, + type='sum', + inputs={"X": avg_sum}, + outputs={"Out": loss_sum}, attrs=None) - else: - avg_sum = [] - for cur_loss in outputs_valid: - cur_avg_loss = block.create_var( + loss = block.create_var( dtype=self.dtype, type=core.VarDesc.VarType.LOD_TENSOR, persistable=False, - stop_gradient=False) + stop_gradient=False, + shape=[1]) block.append_op( - type="mean", - inputs={"X": outputs_valid[cur_loss]}, - outputs={"Out": [cur_avg_loss]}, - attrs=None) - avg_sum.append(cur_avg_loss) - loss_sum = block.create_var( - dtype=self.dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=False, - shape=[1]) - block.append_op( - type='sum', - inputs={"X": avg_sum}, - outputs={"Out": loss_sum}, - attrs=None) - loss = block.create_var( - dtype=self.dtype, - type=core.VarDesc.VarType.LOD_TENSOR, - persistable=False, - stop_gradient=False, - shape=[1]) - block.append_op( - type='scale', - inputs={"X": loss_sum}, - outputs={"Out": loss}, - attrs={'scale': 1.0 / float(len(avg_sum))}) - loss.backward() - - fetch_list_grad = [] - for inputs_to_check_name in inputs_to_check: - a = inputs_grad_dict[inputs_to_check_name].gradient() - fetch_list_grad.append(a) - return fetch_list_grad + type='scale', + inputs={"X": loss_sum}, + outputs={"Out": loss}, + attrs={'scale': 1.0 / float(len(avg_sum))}) + loss.backward() + fetch_list_grad = [] + for inputs_to_check_name in inputs_to_check: + a = inputs_grad_dict[inputs_to_check_name].gradient() + fetch_list_grad.append(a) + return fetch_list_grad + else: + # user_defined_grad_outputs here are numpy arrays + if not isinstance(user_defined_grad_outputs, list): + user_defined_grad_outputs = [user_defined_grad_outputs] + grad_outputs = [] + for grad_out_value in user_defined_grad_outputs: + grad_outputs.append(paddle.to_tensor(grad_out_value)) + grad_inputs = paddle.grad( + outputs=fluid.layers.utils.flatten(outputs), + inputs=fluid.layers.utils.flatten(inputs), + grad_outputs=grad_outputs) + return [grad.numpy() for grad in grad_inputs] @staticmethod def _numpy_to_lod_tensor(np_value, lod, place): @@ -1551,18 +1575,48 @@ class OpTest(unittest.TestCase): place, output_names, no_grad_set, + user_defined_grad_outputs=None, parallel=False): prog = Program() + scope = core.Scope() block = prog.global_block() self._append_ops(block) - loss = append_loss_ops(block, output_names) - param_grad_list = append_backward( - loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set) inputs = self._get_inputs(block) + outputs = self._get_outputs(block) feed_dict = self.feed_var(inputs, place) - fetch_list = [g for p, g in param_grad_list] + if user_defined_grad_outputs is None: + loss = append_loss_ops(block, output_names) + param_grad_list = append_backward( + loss=loss, + parameter_list=input_to_check, + no_grad_set=no_grad_set) + fetch_list = [g for p, g in param_grad_list] + else: + assert parallel is False, "unsupported parallel mode when giving custom grad outputs." + # user_defined_grad_outputs here are numpy arrays + if not isinstance(user_defined_grad_outputs, list): + user_defined_grad_outputs = [user_defined_grad_outputs] + grad_outputs = [] + for grad_out_value in user_defined_grad_outputs: + # `presistable` is used to avoid executor create new var in local scope + var = block.create_var( + shape=grad_out_value.shape, + dtype=grad_out_value.dtype, + persistable=True) + true_var = scope.var(var.name) + tensor = true_var.get_tensor() + tensor.set(grad_out_value, place) + grad_outputs.append(var) + targets = [ + outputs[name] for name in outputs if name in output_names + ] + inputs = [inputs[name] for name in inputs if name in input_to_check] + grad_inputs = paddle.static.gradients(targets, inputs, grad_outputs, + no_grad_set) + fetch_list = grad_inputs + if parallel: use_cuda = False if isinstance(place, fluid.CUDAPlace): @@ -1573,4 +1627,8 @@ class OpTest(unittest.TestCase): executor = fluid.Executor(place) return list( map(np.array, - executor.run(prog, feed_dict, fetch_list, return_numpy=False))) + executor.run(prog, + feed_dict, + fetch_list, + scope=scope, + return_numpy=False))) diff --git a/python/paddle/fluid/tests/unittests/test_conj_op.py b/python/paddle/fluid/tests/unittests/test_conj_op.py new file mode 100644 index 0000000000000000000000000000000000000000..774a29ada4a84690ec71ae08086a54be092fc86e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_conj_op.py @@ -0,0 +1,126 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle +import paddle.fluid.core as core +import sys +sys.path.append("..") +from op_test import OpTest +from paddle.fluid import Program, program_guard +import paddle.fluid.dygraph as dg +import paddle.static as static +from numpy.random import random as rand + +paddle.enable_static() + + +class TestConjOp(OpTest): + def setUp(self): + self.op_type = "conj" + self.init_dtype_type() + self.init_input_output() + self.init_grad_input_output() + + def init_dtype_type(self): + self.dtype = np.complex64 + + def init_input_output(self): + x = (np.random.random((12, 14)) + 1j * np.random.random( + (12, 14))).astype(self.dtype) + out = np.conj(x) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.outputs = {'Out': out} + + def init_grad_input_output(self): + self.grad_out = (np.ones((12, 14)) + 1j * np.ones( + (12, 14))).astype(self.dtype) + self.grad_in = np.conj(self.grad_out) + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_in], + user_defined_grad_outputs=[self.grad_out]) + + +class TestComplexConjOp(unittest.TestCase): + def setUp(self): + self._dtypes = ["float32", "float64"] + self._places = [paddle.CPUPlace()] + if paddle.is_compiled_with_cuda(): + self._places.append(paddle.CUDAPlace(0)) + + def test_conj_api(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.to_tensor(input) + result = paddle.conj(var_x).numpy() + target = np.conj(input) + self.assertTrue(np.array_equal(result, target)) + + def test_conj_operator(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.to_tensor(input) + result = var_x.conj().numpy() + target = np.conj(input) + self.assertTrue(np.array_equal(result, target)) + + def test_conj_static_mode(self): + def init_input_output(dtype): + input = rand([2, 20, 2, 3]).astype(dtype) + 1j * rand( + [2, 20, 2, 3]).astype(dtype) + return {'x': input}, np.conj(input) + + for dtype in self._dtypes: + input_dict, np_res = init_input_output(dtype) + for place in self._places: + with static.program_guard(static.Program()): + x_dtype = np.complex64 if dtype == "float32" else np.complex128 + x = static.data( + name="x", shape=[2, 20, 2, 3], dtype=x_dtype) + out = paddle.conj(x) + + exe = static.Executor(place) + out_value = exe.run(feed=input_dict, fetch_list=[out.name]) + self.assertTrue(np.array_equal(np_res, out_value[0])) + + def test_conj_api_real_number(self): + for dtype in self._dtypes: + input = rand([2, 20, 2, 3]).astype(dtype) + for place in self._places: + with dg.guard(place): + var_x = paddle.to_tensor(input) + result = paddle.conj(var_x).numpy() + target = np.conj(input) + self.assertTrue(np.array_equal(result, target)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index c941d7c5f34352ac0e762403d0e7e3f0238cbe36..717ffb765360b091d8763d1d75b2b1e34d375335 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -428,5 +428,64 @@ class TestAddOp(unittest.TestCase): self.assertEqual((np_z == z_expected).all(), True) +class TestComplexElementwiseAddOp(OpTest): + def setUp(self): + self.op_type = "elementwise_add" + self.init_base_dtype() + self.init_input_output() + self.init_grad_input_output() + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + } + self.attrs = {'axis': -1, 'use_mkldnn': False} + self.outputs = {'Out': self.out} + + def init_base_dtype(self): + self.dtype = np.float64 + + def init_input_output(self): + self.x = np.random.random( + (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(self.dtype) + self.y = np.random.random( + (2, 3, 4, 5)).astype(self.dtype) + 1J * np.random.random( + (2, 3, 4, 5)).astype(self.dtype) + self.out = self.x + self.y + + def init_grad_input_output(self): + self.grad_out = np.ones((2, 3, 4, 5), self.dtype) + 1J * np.ones( + (2, 3, 4, 5), self.dtype) + self.grad_x = self.grad_out + self.grad_y = self.grad_out + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad( + ['X', 'Y'], + 'Out', + user_defined_grads=[self.grad_x, self.grad_y], + user_defined_grad_outputs=[self.grad_out]) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], + 'Out', + no_grad_set=set("X"), + user_defined_grads=[self.grad_y], + user_defined_grad_outputs=[self.grad_out]) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], + 'Out', + no_grad_set=set('Y'), + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out]) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_real_imag_op.py b/python/paddle/fluid/tests/unittests/test_real_imag_op.py new file mode 100644 index 0000000000000000000000000000000000000000..ab24506f80101f3777a88cf5cbd8a113cba3587d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_real_imag_op.py @@ -0,0 +1,167 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np + +import paddle +import paddle.fluid as fluid +import paddle.static as static +from op_test import OpTest + +numpy_apis = { + "real": np.real, + "imag": np.imag, +} + +paddle_apis = { + "real": paddle.real, + "imag": paddle.imag, +} + + +class TestRealOp(OpTest): + def setUp(self): + # switch to static + paddle.enable_static() + # op test attrs + self.op_type = "real" + self.dtype = np.float64 + self.init_input_output() + # backward attrs + self.init_grad_input_output() + + def init_input_output(self): + self.inputs = { + 'X': np.random.random( + (20, 5)).astype(self.dtype) + 1j * np.random.random( + (20, 5)).astype(self.dtype) + } + self.outputs = {'Out': numpy_apis[self.op_type](self.inputs['X'])} + + def init_grad_input_output(self): + self.grad_out = np.ones((20, 5), self.dtype) + self.grad_x = np.real(self.grad_out) + 1j * np.zeros( + self.grad_out.shape) + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad( + ['X'], + 'Out', + user_defined_grads=[self.grad_x], + user_defined_grad_outputs=[self.grad_out]) + + +class TestImagOp(TestRealOp): + def setUp(self): + # switch to static + paddle.enable_static() + # op test attrs + self.op_type = "imag" + self.dtype = np.float64 + self.init_input_output() + # backward attrs + self.init_grad_input_output() + + def init_grad_input_output(self): + self.grad_out = np.ones((20, 5), self.dtype) + self.grad_x = np.zeros(self.grad_out.shape) + 1j * np.real( + self.grad_out) + + +class TestRealAPI(unittest.TestCase): + def setUp(self): + # switch to static + paddle.enable_static() + # prepare test attrs + self.api = "real" + self.dtypes = ["complex64", "complex128"] + self.places = [paddle.CPUPlace()] + if paddle.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + self._shape = [2, 20, 2, 3] + + def test_in_static_mode(self): + def init_input_output(dtype): + input = np.random.random(self._shape).astype( + dtype) + 1j * np.random.random(self._shape).astype(dtype) + return {'x': input}, numpy_apis[self.api](input) + + for dtype in self.dtypes: + input_dict, np_res = init_input_output(dtype) + for place in self.places: + with static.program_guard(static.Program()): + x = static.data(name="x", shape=self._shape, dtype=dtype) + out = paddle_apis[self.api](x) + + exe = static.Executor(place) + out_value = exe.run(feed=input_dict, fetch_list=[out.name]) + self.assertTrue(np.array_equal(np_res, out_value[0])) + + def test_in_dynamic_mode(self): + for dtype in self.dtypes: + input = np.random.random(self._shape).astype( + dtype) + 1j * np.random.random(self._shape).astype(dtype) + np_res = numpy_apis[self.api](input) + for place in self.places: + # it is more convenient to use `guard` than `enable/disable_**` here + with fluid.dygraph.guard(place): + input_t = paddle.to_tensor(input) + res = paddle_apis[self.api](input_t).numpy() + self.assertTrue(np.array_equal(np_res, res)) + res_t = input_t.real().numpy( + ) if self.api is "real" else input_t.imag().numpy() + self.assertTrue(np.array_equal(np_res, res_t)) + + def test_name_argument(self): + with static.program_guard(static.Program()): + x = static.data(name="x", shape=self._shape, dtype=self.dtypes[0]) + out = paddle_apis[self.api](x, name="real_res") + self.assertTrue("real_res" in out.name) + + def test_dtype_error(self): + # in static mode + with self.assertRaises(TypeError): + with static.program_guard(static.Program()): + x = static.data(name="x", shape=self._shape, dtype="float32") + out = paddle_apis[self.api](x, name="real_res") + + # in dynamic mode + with self.assertRaises(RuntimeError): + with fluid.dygraph.guard(): + input = np.random.random(self._shape).astype("float32") + input_t = paddle.to_tensor(input) + res = paddle_apis[self.api](input_t) + + +class TestImagAPI(TestRealAPI): + def setUp(self): + # switch to static + paddle.enable_static() + # prepare test attrs + self.api = "imag" + self.dtypes = ["complex64", "complex128"] + self.places = [paddle.CPUPlace()] + if paddle.is_compiled_with_cuda(): + self.places.append(paddle.CUDAPlace(0)) + self._shape = [2, 20, 2, 3] + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 515b4024471209cb84fdf354d9167ee07aa259f6..ecde93834508e01e610ee4910d4893ca4e2e89a8 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -22,6 +22,8 @@ from __future__ import print_function from .random import randperm from .attribute import rank #DEFINE_ALIAS from .attribute import shape #DEFINE_ALIAS +from .attribute import real #DEFINE_ALIAS +from .attribute import imag #DEFINE_ALIAS from .creation import to_tensor #DEFINE_ALIAS from .creation import diag #DEFINE_ALIAS from .creation import eye #DEFINE_ALIAS @@ -167,6 +169,7 @@ from .math import prod #DEFINE_ALIAS from .math import all #DEFINE_ALIAS from .math import any #DEFINE_ALIAS from .math import broadcast_shape #DEFINE_ALIAS +from .math import conj #DEFINE_ALIAS from .random import multinomial #DEFINE_ALIAS from .random import standard_normal diff --git a/python/paddle/tensor/attribute.py b/python/paddle/tensor/attribute.py index 255557673c1df30bfdd3ebc62ec61b36dc1ede89..499586b083fc4d5966f7d40a11c155354509136b 100644 --- a/python/paddle/tensor/attribute.py +++ b/python/paddle/tensor/attribute.py @@ -12,8 +12,111 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import print_function + +from ..fluid.framework import core, in_dygraph_mode, Variable +from ..fluid.layer_helper import LayerHelper +from ..fluid.data_feeder import check_variable_and_dtype + # TODO: define functions to get tensor attributes from ..fluid.layers import rank #DEFINE_ALIAS from ..fluid.layers import shape #DEFINE_ALIAS -__all__ = ['rank', 'shape'] +__all__ = ['rank', 'shape', 'real', 'imag'] + + +def _complex_to_real_dtype(dtype): + if dtype == core.VarDesc.VarType.COMPLEX64: + return core.VarDesc.VarType.FP32 + elif dtype == core.VarDesc.VarType.COMPLEX128: + return core.VarDesc.VarType.FP64 + else: + return dtype + + +def real(x, name=None): + """ + Returns a new tensor containing real values of the input tensor. + + Args: + x (Tensor): the input tensor, its data type could be complex64 or complex128. + name (str, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name` . + + Returns: + Tensor: a tensor containing real values of the input tensor. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor( + [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]]) + # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, + # [[(1+6j), (2+5j), (3+4j)], + # [(4+3j), (5+2j), (6+1j)]]) + + real_res = paddle.real(x) + # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, + # [[1., 2., 3.], + # [4., 5., 6.]]) + + real_t = x.real() + # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, + # [[1., 2., 3.], + # [4., 5., 6.]]) + """ + if in_dygraph_mode(): + return core.ops.real(x) + + check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'real') + helper = LayerHelper('real', **locals()) + out = helper.create_variable_for_type_inference( + dtype=_complex_to_real_dtype(helper.input_dtype())) + helper.append_op(type='real', inputs={'X': x}, outputs={'Out': out}) + return out + + +def imag(x, name=None): + """ + Returns a new tensor containing imaginary values of input tensor. + + Args: + x (Tensor): the input tensor, its data type could be complex64 or complex128. + name (str, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name` . + + Returns: + Tensor: a tensor containing imaginary values of the input tensor. + + Examples: + .. code-block:: python + + import paddle + + x = paddle.to_tensor( + [[1 + 6j, 2 + 5j, 3 + 4j], [4 + 3j, 5 + 2j, 6 + 1j]]) + # Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, + # [[(1+6j), (2+5j), (3+4j)], + # [(4+3j), (5+2j), (6+1j)]]) + + imag_res = paddle.imag(x) + # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, + # [[6., 5., 4.], + # [3., 2., 1.]]) + + imag_t = x.imag() + # Tensor(shape=[2, 3], dtype=float32, place=CUDAPlace(0), stop_gradient=True, + # [[6., 5., 4.], + # [3., 2., 1.]]) + """ + if in_dygraph_mode(): + return core.ops.imag(x) + + check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'imag') + helper = LayerHelper('imag', **locals()) + out = helper.create_variable_for_type_inference( + dtype=_complex_to_real_dtype(helper.input_dtype())) + helper.append_op(type='imag', inputs={'X': x}, outputs={'Out': out}) + return out diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 80d2a4a513398ed1630014a0327efbc1d0010fe9..f14cbebb57d30d8c0f923fe728a62e98aa6b067b 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -124,7 +124,8 @@ __all__ = [ 'isfinite', 'isinf', 'isnan', - 'broadcast_shape' + 'broadcast_shape', + 'conj' ] # yapf: enable. @@ -2213,3 +2214,44 @@ def broadcast_shape(x_shape, y_shape): """ return core.broadcast_shape(x_shape, y_shape) + +def conj(x, name=None): + r""" + This function computes the conjugate of the Tensor elementwisely. + + Args: + x (Tensor): The input tensor which hold the complex numbers. + Optional data types are: complex64, complex128, float32, float64, int32 or int64. + name (str, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name` + + Returns: + out (Tensor): The conjugate of input. The shape and data type is the same with input. + If the elements of tensor is real type such as float32, float64, int32 or int64, the out is the same with input. + + Examples: + .. code-block:: python + + import paddle + data=paddle.to_tensor([[1+1j, 2+2j, 3+3j], [4+4j, 5+5j, 6+6j]]) + #Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, + # [[(1+1j), (2+2j), (3+3j)], + # [(4+4j), (5+5j), (6+6j)]]) + + conj_data=paddle.conj(data) + #Tensor(shape=[2, 3], dtype=complex64, place=CUDAPlace(0), stop_gradient=True, + # [[(1-1j), (2-2j), (3-3j)], + # [(4-4j), (5-5j), (6-6j)]]) + + """ + if in_dygraph_mode(): + return core.ops.conj(x) + + check_variable_and_dtype(x, "x", ['complex64', 'complex128', 'float32', 'float64', 'int32', 'int64'], 'conj') + + helper = LayerHelper('conj', **locals()) + out = helper.create_variable_for_type_inference( + dtype=helper.input_dtype()) + + helper.append_op(type='conj', inputs={'X': x}, outputs={'Out': [out]}) + return out