diff --git a/paddle/fluid/operators/meshgrid_op.cc b/paddle/fluid/operators/meshgrid_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..12b255329da2d5875dbdf338ed83d3682a0bd8b2 --- /dev/null +++ b/paddle/fluid/operators/meshgrid_op.cc @@ -0,0 +1,157 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/meshgrid_op.h" + +#include +#include +#include + +namespace paddle { +namespace operators { + +using framework::Tensor; + +class MeshgridOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE_GE( + ctx->Inputs("X").size(), 1UL, + platform::errors::InvalidArgument("Input(X) should not be empty.")); + PADDLE_ENFORCE_GE( + ctx->Outputs("Out").size(), 1UL, + platform::errors::InvalidArgument("Output(Out) should not be empty.")); + + auto inputs_dims = ctx->GetInputsDim("X"); + const size_t inputs_num = inputs_dims.size(); + auto outs_names = ctx->Outputs("Out"); + const size_t outputs_num = outs_names.size(); + + auto out_shape = std::vector(inputs_num); + + for (size_t i = 0; i < inputs_num; i++) { + out_shape[i] = inputs_dims[i][0]; + } + auto out_dims = framework::make_ddim(std::vector(out_shape)); + std::vector outs_dims(outputs_num, out_dims); + ctx->SetOutputsDim("Out", outs_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto inputs = ctx.MultiInput("X"); + auto input_data_type = framework::proto::VarType::Type(0); + bool flag = 0; + for (auto* input : inputs) { + if (input->IsInitialized() && input->numel() > 0) { + input_data_type = input->type(); + flag = 1; + break; + } + } + if (flag == 0) { + PADDLE_THROW(platform::errors::InvalidArgument( + "All Inputs of Meshgrid OP are Empty!")); + } + + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } +}; + +class MeshgridOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor, default Tensor).").AsDuplicable(); + AddOutput("Out", "(Tensor, default Tensor.)").AsDuplicable(); + + AddComment(R"DOC( +Meshgrid Operator. +Take: N tensors, each of which can be either scalr or 1-dimensional vector, and create +N-dimensional grids. + +Args: + tensors (list of tensor): if the input k tensors has (N1,), (N2,),..., (Nk,), then + the output tensors are all of size (N1, N2, ...., Nk). + +Example:: +>>> x = fluid.data(name='x', shape=[10], dtype='float64') +>>> y = fluid.data(name='y', shape=[20], dtype='float64') +>>> grid_x, grid_y = fluid.layers.meshgrid([x, y]) +>>> grid_x.shape +(10,20) +>>> grid_y.shape +(10,20) +)DOC"); + } +}; + +class MeshgridGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE_GT(ctx->Inputs(framework::GradVarName("Out")).size(), 1, + platform::errors::InvalidArgument( + "Number of Inputs(Out@Grad) must be larger than 1")); + ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType( + ctx, framework::GradVarName("Out")), + ctx.device_context()); + } +}; + +template +class MeshgridGradOpMaker : public framework::SingleGradOpMaker { + public: + using framework::SingleGradOpMaker::SingleGradOpMaker; + + protected: + void Apply(GradOpPtr op) const override { + op->SetType("meshgrid_grad"); + op->SetInput("X", this->Input("X")); + op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); + op->SetOutput(framework::GradVarName("X"), this->InputGrad("X", false)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(meshgrid, ops::MeshgridOp, ops::MeshgridOpMaker, + ops::MeshgridGradOpMaker, + ops::MeshgridGradOpMaker); +REGISTER_OPERATOR(meshgrid_grad, ops::MeshgridGradOp); +REGISTER_OP_CPU_KERNEL( + meshgrid, ops::MeshgridKernel, + ops::MeshgridKernel, + ops::MeshgridKernel, + ops::MeshgridKernel); + +REGISTER_OP_CPU_KERNEL( + meshgrid_grad, + ops::MeshgridGradKernel, + ops::MeshgridGradKernel, + ops::MeshgridGradKernel, + ops::MeshgridGradKernel); diff --git a/paddle/fluid/operators/meshgrid_op.cu b/paddle/fluid/operators/meshgrid_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..dc813a07f8c8c17e6c9b967a4fad372513d61594 --- /dev/null +++ b/paddle/fluid/operators/meshgrid_op.cu @@ -0,0 +1,29 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/meshgrid_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + meshgrid, ops::MeshgridKernel, + ops::MeshgridKernel, + ops::MeshgridKernel, + ops::MeshgridKernel, + ops::MeshgridKernel); +REGISTER_OP_CUDA_KERNEL( + meshgrid_grad, + ops::MeshgridGradKernel, + ops::MeshgridGradKernel, + ops::MeshgridGradKernel, + ops::MeshgridGradKernel); diff --git a/paddle/fluid/operators/meshgrid_op.h b/paddle/fluid/operators/meshgrid_op.h new file mode 100644 index 0000000000000000000000000000000000000000..d591912bef800afac50a3e9753dab7f402655b30 --- /dev/null +++ b/paddle/fluid/operators/meshgrid_op.h @@ -0,0 +1,198 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/errors.h" + +#define MAX_RANK_SUPPORTED 6 + +#define MESHGRID_TEMPLATE(z, n, data) \ + case n + 1: { \ + MeshgridForward(context); \ + break; \ + } +#define REP_MESHGRID_TEMPLATE(n) BOOST_PP_REPEAT(n, MESHGRID_TEMPLATE, ~) +#define COND(n) BOOST_PP_GREATER_EQUAL(n, BOOST_PP_MOD(n, MAX_RANK_SUPPORTED)) + +#define MESHGRID_GRAD_CASE(n) \ + case n: { \ + MeshgridBackward(context); \ + break; \ + } +#define MESHGRID_GRAD_TEMPLATE(z, n, data) \ + BOOST_PP_IF(COND(n), MESHGRID_GRAD_CASE(n), ) +#define REP_MESHGRID_GRAD_TEMPLATE(n) \ + BOOST_PP_REPEAT(n, MESHGRID_GRAD_TEMPLATE, ~) + +namespace paddle { +namespace operators { + +template +using EigenMatrix = framework::EigenMatrix; +template +using EigenVector = framework::EigenVector; +template +using EigenTensor = framework::EigenTensor; + +template +class MeshgridKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto ins = context.MultiInput("X"); + auto rank = ins.size(); + switch (rank) { + REP_MESHGRID_TEMPLATE(MAX_RANK_SUPPORTED) + default: + PADDLE_THROW(platform::errors::InvalidArgument( + "Only support tensor nums between 1 and 6.")); + } + } + + protected: + template + void MeshgridForward(const framework::ExecutionContext& context) const { + auto ins = context.MultiInput("X"); + auto outs = context.MultiOutput("Out"); + PADDLE_ENFORCE_EQ( + ins.size() > 1, true, + platform::errors::InvalidArgument("expect at least 2 input tensors")); + + int64_t size = ins.size(); + std::vector shape(size); + + for (int64_t i = 0; i < size; i++) { + switch (ins[i]->dims().size()) { + case 0: + shape[i] = 1; + break; + case 1: + shape[i] = ins[i]->dims()[0]; + break; + default: + PADDLE_THROW(platform::errors::InvalidArgument( + "Expected scalar or 1D tensor in the tensor list but got tensor " + "%d: ", + i)); + } + } + + for (int64_t i = 0; i < size; i++) { + std::vector view_shape(size, 1); + view_shape[i] = shape[i]; + + framework::Tensor reshape_ins_tensor; + TensorCopy(*ins[i], context.GetPlace(), context.device_context(), + &reshape_ins_tensor); + framework::DDim out_dims_reshape = framework::make_ddim(view_shape); + reshape_ins_tensor.Resize(out_dims_reshape); + framework::DDim out_dims = framework::make_ddim(shape); + + Eigen::DSizes bcast_dims; + for (int64_t j = 0; j < size; j++) { + bcast_dims[j] = shape[j]; + } + bcast_dims[i] = 1; + + outs[i]->Resize(out_dims); + auto x = EigenTensor::From(reshape_ins_tensor); + outs[i]->mutable_data(context.GetPlace()); + auto y = EigenTensor::From(*outs[i]); + auto& place = + *context.template device_context().eigen_device(); + y.device(place) = x.broadcast(bcast_dims); + } + } +}; + +template +class MeshgridGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto out_grad = + context.MultiInput(framework::GradVarName("Out")); + int n = out_grad.size(); + switch (n) { + REP_MESHGRID_GRAD_TEMPLATE(MAX_RANK_SUPPORTED) + default: + PADDLE_THROW(platform::errors::InvalidArgument( + "only support tensor nums being between 1 and 6.")); + } + } + + protected: + template + void MeshgridBackward(const framework::ExecutionContext& context) const { + auto out_grad = + context.MultiInput(framework::GradVarName("Out")); + auto ins = context.MultiInput("X"); + auto outs = + context.MultiOutput(framework::GradVarName("X")); + + int n = out_grad.size(); + auto out_dims = out_grad[0]->dims(); + + for (int i = 0; i < n; i++) { + outs[i]->mutable_data(context.GetPlace()); + auto out_grad_tmp = EigenVector::Flatten(*out_grad[i]); + auto in_grad = EigenVector::Flatten(*outs[i]); + + std::vector reduce_dims_vec; + std::vector reshape_dims_vec; + for (int j = 0; j < n; j++) { + reduce_dims_vec.push_back(reshape_dims_vec.size()); + if (j == i) { + reshape_dims_vec.push_back(1); + reshape_dims_vec.push_back(out_dims[j]); + } else { + reshape_dims_vec.push_back(out_dims[j]); + reshape_dims_vec.push_back(1); + } + } + + Eigen::DSizes reduce_dims; + for (int k = 0; k < n; k++) { + reduce_dims[k] = reduce_dims_vec[k]; + } + + Eigen::DSizes reshape_dims; + for (int k = 0; k < n * 2; k++) { + reshape_dims[k] = reshape_dims_vec[k]; + } + + auto tensor_reduce_tmp = + out_grad_tmp.reshape(reshape_dims).sum(reduce_dims); + auto& place = + *context.template device_context().eigen_device(); + in_grad.device(place) = tensor_reduce_tmp.reshape(in_grad.dimensions()); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 58b58005d8630805e09df844de1e6b6844e15225..9eac1a93f87a9749ca8c90571f70ab6933082d0a 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -59,7 +59,7 @@ from .tensor.creation import full #DEFINE_ALIAS from .tensor.creation import full_like #DEFINE_ALIAS # from .tensor.creation import triu #DEFINE_ALIAS # from .tensor.creation import tril #DEFINE_ALIAS -# from .tensor.creation import meshgrid #DEFINE_ALIAS +from .tensor.creation import meshgrid #DEFINE_ALIAS # from .tensor.stat import mean #DEFINE_ALIAS # from .tensor.stat import reduce_mean #DEFINE_ALIAS # from .tensor.stat import std #DEFINE_ALIAS diff --git a/python/paddle/fluid/tests/unittests/test_meshgrid_op.py b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py new file mode 100644 index 0000000000000000000000000000000000000000..eea1ca3282c93a40cc9fcf3149329a358cadcf41 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_meshgrid_op.py @@ -0,0 +1,118 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +from op_test import OpTest, skip_check_grad_ci +import paddle.fluid as fluid +import paddle +from paddle.fluid import compiler, Program, program_guard, core + + +class TestMeshgridOp(OpTest): + def setUp(self): + self.op_type = "meshgrid" + self.dtype = self.get_dtype() + ins, outs = self.init_test_data() + self.inputs = {'X': [('x%d' % i, ins[i]) for i in range(len(ins))]} + self.outputs = { + 'Out': [('out%d' % i, outs[i]) for i in range(len(outs))] + } + + def get_dtype(self): + return "float64" + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['x0'], ['out0']) + self.check_grad(['x1'], ['out1']) + + def init_test_data(self): + self.shape = self.get_x_shape() + ins = [] + outs = [] + for i in range(len(self.shape)): + ins.append(np.random.random((self.shape[i], )).astype(self.dtype)) + + for i in range(len(self.shape)): + out_reshape = [1] * len(self.shape) + out_reshape[i] = self.shape[i] + out_temp = np.reshape(ins[i], out_reshape) + outs.append(np.broadcast_to(out_temp, self.shape)) + return ins, outs + + def get_x_shape(self): + return [100, 200] + + +class TestMeshgridOp2(TestMeshgridOp): + def get_x_shape(self): + return [100, 300] + + +class TestMeshgridOp3(unittest.TestCase): + def test_api(self): + x = fluid.data(shape=[100], dtype='int32', name='x') + y = fluid.data(shape=[200], dtype='int32', name='y') + + input_1 = np.random.randint(0, 100, [100, ]).astype('int32') + input_2 = np.random.randint(0, 100, [200, ]).astype('int32') + + out_1 = np.reshape(input_1, [100, 1]) + out_1 = np.broadcast_to(out_1, [100, 200]) + out_2 = np.reshape(input_2, [1, 200]) + out_2 = np.broadcast_to(out_2, [100, 200]) + + exe = fluid.Executor(place=fluid.CPUPlace()) + grid_x, grid_y = paddle.tensor.meshgrid([x, y]) + res_1, res_2 = exe.run(fluid.default_main_program(), + feed={'x': input_1, + 'y': input_2}, + fetch_list=[grid_x, grid_y]) + + assert np.array_equal(res_1, out_1) + assert np.array_equal(res_2, out_2) + + +class TestMeshgridOp4(unittest.TestCase): + def test_errors(self): + with program_guard(Program(), Program()): + + def test_input_type(): + x = fluid.data(shape=[200], dtype='float32', name='x2') + paddle.tensor.meshgrid(x) + + self.assertRaises(TypeError, test_input_type) + + +class TestMeshgridOp5(unittest.TestCase): + def test_api_with_dygraph(self): + input_3 = np.random.randint(0, 100, [100, ]).astype('int32') + input_4 = np.random.randint(0, 100, [200, ]).astype('int32') + + with fluid.dygraph.guard(): + tensor_3 = fluid.dygraph.to_variable(input_3) + tensor_4 = fluid.dygraph.to_variable(input_4) + res_3, res_4 = paddle.tensor.meshgrid([tensor_3, tensor_4]) + + assert np.array_equal(res_3.shape, [100, 200]) + assert np.array_equal(res_4.shape, [100, 200]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index a8025c02227639c6601ced12f96ebedb6ec9b5d2..589754a85d13880ad1d6bbb3037af7f6b86b7988 100644 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -39,7 +39,7 @@ from .creation import full # DEFINE_ALIAS from .creation import full_like #DEFINE_ALIAS from .creation import triu #DEFINE_ALIAS from .creation import tril #DEFINE_ALIAS -# from .creation import meshgrid #DEFINE_ALIAS +from .creation import meshgrid #DEFINE_ALIAS # from .stat import mean #DEFINE_ALIAS # from .stat import reduce_mean #DEFINE_ALIAS # from .stat import std #DEFINE_ALIAS diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 232cb6f1b28e936b24c151fff3956391cbd06034..be58a9dd868d3bbc1f23c0e76996a951bb80ce18 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -13,7 +13,7 @@ # limitations under the License. from __future__ import print_function -from ..fluid.framework import Variable +from ..fluid.framework import Variable, in_dygraph_mode from ..fluid.initializer import Constant from ..fluid.layers import core from ..fluid.layer_helper import LayerHelper @@ -43,7 +43,7 @@ __all__ = [ 'full_like', 'triu', 'tril', - # 'meshgrid', + 'meshgrid', ] @@ -723,3 +723,85 @@ def triu(input, diagonal=0, name=None): """ return _tril_triu_op(LayerHelper('triu', **locals())) + + +def meshgrid(input, name=None): + """ + This op takes a list of N tensors as input, each of which is 1-dimensional + vector, and creates N-dimensional grids. + + Args: + input(Variable) : tensors (list of tensor): the shapes of input k tensors are (N1,), + (N2,),..., (Nk,). Support data types: ``float64``, ``float32``, ``int32``, ``int64``. + name (str, optional): The default value is None. Normally there is no need for + user to set this property. For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Variable: k tensors. The shape of each tensor is (N1, N2, ..., Nk) + + Examples: + .. code-block:: python + + import paddle + import paddle.fluid as fluid + import numpy as np + + x = fluid.data(name='x', shape=[100], dtype='int32') + y = fluid.data(name='y', shape=[200], dtype='int32') + + input_1 = np.random.randint(0, 100, [100, ]).astype('int32') + input_2 = np.random.randint(0, 100, [200, ]).astype('int32') + + exe = fluid.Executor(place=fluid.CPUPlace()) + grid_x, grid_y = paddle.tensor.meshgrid([x, y]) + res_1, res_2 = exe.run(fluid.default_main_program(), + feed={'x': input_1, + 'y': input_2}, + fetch_list=[grid_x, grid_y]) + + #the shape of res_1 is (100, 200) + #the shape of res_2 is (100, 200) + + .. code-block:: python + + #example 2: in dygraph mode + + import paddle + import paddle.fluid as fluid + import numpy as np + + input_3 = np.random.randint(0, 100, [100, ]).astype('int32') + input_4 = np.random.randint(0, 100, [200, ]).astype('int32') + with fluid.dygraph.guard(): + tensor_3 = fluid.dygraph.to_variable(input_3) + tensor_4 = fluid.dygraph.to_variable(input_4) + grid_x, grid_y = paddle.tensor.meshgrid([tensor_3, tensor_4]) + + #the shape of grid_x is (100, 200) + #the shape of grid_y is (100, 200) + + """ + + if in_dygraph_mode(): + num = len(input) + out = core.ops.meshgrid(input, num) + return out + + helper = LayerHelper('meshgrid', **locals()) + + if not isinstance(input, list): + raise TypeError("The type of input in meshgrid should be list.") + + for id, input_ in enumerate(input): + check_dtype(input_.dtype, 'create data type', + ['float16', 'float32', 'float64', 'int32', 'int64'], + 'meshgrid') + + num = len(input) + out = [ + helper.create_variable_for_type_inference(dtype=input[i].dtype) + for i in range(num) + ] + helper.append_op(type='meshgrid', inputs={'X': input}, outputs={'Out': out}) + + return out