未验证 提交 0d719718 编写于 作者: F fwenguang 提交者: GitHub

[MLU] add mlu meshgrid kernel (#43271)

上级 20b38cfa
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/mlu/mlu_baseop.h"
namespace paddle {
namespace operators {
template <typename T>
class MeshgridMLUKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto ins = ctx.MultiInput<framework::Tensor>("X");
auto outs = ctx.MultiOutput<framework::Tensor>("Out");
PADDLE_ENFORCE_EQ(
(ins.size() > 1) && (ins.size() < 7), true,
platform::errors::InvalidArgument(
"Excepted Tensor numbers between 2 and 6, but only received d% .",
ins.size()));
int64_t size = ins.size();
std::vector<int64_t> shape(size);
for (int64_t i = 0; i < size; i++) {
switch (ins[i]->dims().size()) {
case 0:
shape[i] = 1;
break;
case 1:
shape[i] = ins[i]->dims()[0];
break;
default:
PADDLE_THROW(platform::errors::InvalidArgument(
"Expected scalar or 1D tensor in the tensor list but got tensor "
"%d: ",
i));
}
}
MLUCnnlTensorDesc out_desc(size, shape.data(), ToCnnlDataType<T>());
framework::DDim out_dims = phi::make_ddim(shape);
for (int64_t i = 0; i < size; i++) {
std::vector<int64_t> view_shape(size, 1);
view_shape[i] = shape[i];
outs[i]->Resize(out_dims);
outs[i]->mutable_data<T>(ctx.GetPlace());
MLUCnnlTensorDesc in_desc(size, view_shape.data(), ToCnnlDataType<T>());
MLUCnnl::BroadcastTo(ctx, in_desc.get(), GetBasePtr(ins[i]),
out_desc.get(), GetBasePtr(outs[i]));
}
}
};
} // namespace operators
} // namespace paddle
REGISTER_OP_MLU_KERNEL(
meshgrid, paddle::operators::MeshgridMLUKernel<int>,
paddle::operators::MeshgridMLUKernel<float>,
paddle::operators::MeshgridMLUKernel<int64_t>,
paddle::operators::MeshgridMLUKernel<paddle::platform::float16>);
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import sys
sys.path.append('..')
from op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid
import paddle
from paddle.fluid import compiler, Program, program_guard, core
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static()
class TestMeshgridOp(OpTest):
def setUp(self):
self.op_type = "meshgrid"
self.place = paddle.device.MLUPlace(0)
self.__class__.use_mlu = True
self.dtype = self.get_dtype()
ins, outs = self.init_test_data()
self.inputs = {'X': [('x%d' % i, ins[i]) for i in range(len(ins))]}
self.outputs = {
'Out': [('out%d' % i, outs[i]) for i in range(len(outs))]
}
def get_dtype(self):
return "float32"
def test_check_output(self):
self.check_output_with_place(self.place)
def init_test_data(self):
self.shape = self.get_x_shape()
ins = []
outs = []
for i in range(len(self.shape)):
ins.append(np.random.random((self.shape[i], )).astype(self.dtype))
for i in range(len(self.shape)):
out_reshape = [1] * len(self.shape)
out_reshape[i] = self.shape[i]
out_temp = np.reshape(ins[i], out_reshape)
outs.append(np.broadcast_to(out_temp, self.shape))
return ins, outs
def get_x_shape(self):
return [100, 200]
class TestMeshgridOp2(TestMeshgridOp):
def get_x_shape(self):
return [100, 300]
class TestMeshgridOp3(unittest.TestCase):
def test_api(self):
x = fluid.data(shape=[100], dtype='int32', name='x')
y = fluid.data(shape=[200], dtype='int32', name='y')
input_1 = np.random.randint(0, 100, [
100,
]).astype('int32')
input_2 = np.random.randint(0, 100, [
200,
]).astype('int32')
out_1 = np.reshape(input_1, [100, 1])
out_1 = np.broadcast_to(out_1, [100, 200])
out_2 = np.reshape(input_2, [1, 200])
out_2 = np.broadcast_to(out_2, [100, 200])
exe = fluid.Executor(place=fluid.MLUPlace(0))
grid_x, grid_y = paddle.tensor.meshgrid(x, y)
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={
'x': input_1,
'y': input_2
},
fetch_list=[grid_x, grid_y])
assert np.array_equal(res_1, out_1)
assert np.array_equal(res_2, out_2)
class TestMeshgridOp4(unittest.TestCase):
def test_list_input(self):
x = fluid.data(shape=[100], dtype='int32', name='x')
y = fluid.data(shape=[200], dtype='int32', name='y')
input_1 = np.random.randint(0, 100, [
100,
]).astype('int32')
input_2 = np.random.randint(0, 100, [
200,
]).astype('int32')
out_1 = np.reshape(input_1, [100, 1])
out_1 = np.broadcast_to(out_1, [100, 200])
out_2 = np.reshape(input_2, [1, 200])
out_2 = np.broadcast_to(out_2, [100, 200])
exe = fluid.Executor(place=fluid.MLUPlace(0))
grid_x, grid_y = paddle.tensor.meshgrid([x, y])
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={
'x': input_1,
'y': input_2
},
fetch_list=[grid_x, grid_y])
assert np.array_equal(res_1, out_1)
assert np.array_equal(res_2, out_2)
class TestMeshgridOp5(unittest.TestCase):
def test_tuple_input(self):
x = fluid.data(shape=[100], dtype='int32', name='x')
y = fluid.data(shape=[200], dtype='int32', name='y')
input_1 = np.random.randint(0, 100, [
100,
]).astype('int32')
input_2 = np.random.randint(0, 100, [
200,
]).astype('int32')
out_1 = np.reshape(input_1, [100, 1])
out_1 = np.broadcast_to(out_1, [100, 200])
out_2 = np.reshape(input_2, [1, 200])
out_2 = np.broadcast_to(out_2, [100, 200])
exe = fluid.Executor(place=fluid.MLUPlace(0))
grid_x, grid_y = paddle.tensor.meshgrid((x, y))
res_1, res_2 = exe.run(fluid.default_main_program(),
feed={
'x': input_1,
'y': input_2
},
fetch_list=[grid_x, grid_y])
assert np.array_equal(res_1, out_1)
assert np.array_equal(res_2, out_2)
class TestMeshgridOp7(unittest.TestCase):
def test_api_with_dygraph_list_input(self):
input_3 = np.random.randint(0, 100, [
100,
]).astype('int32')
input_4 = np.random.randint(0, 100, [
200,
]).astype('int32')
with fluid.dygraph.guard():
tensor_3 = fluid.dygraph.to_variable(input_3)
tensor_4 = fluid.dygraph.to_variable(input_4)
res_3, res_4 = paddle.tensor.meshgrid([tensor_3, tensor_4])
assert np.array_equal(res_3.shape, [100, 200])
assert np.array_equal(res_4.shape, [100, 200])
class TestMeshgridOp8(unittest.TestCase):
def test_api_with_dygraph_tuple_input(self):
input_3 = np.random.randint(0, 100, [
100,
]).astype('int32')
input_4 = np.random.randint(0, 100, [
200,
]).astype('int32')
with fluid.dygraph.guard():
tensor_3 = fluid.dygraph.to_variable(input_3)
tensor_4 = fluid.dygraph.to_variable(input_4)
res_3, res_4 = paddle.tensor.meshgrid((tensor_3, tensor_4))
assert np.array_equal(res_3.shape, [100, 200])
assert np.array_equal(res_4.shape, [100, 200])
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册