未验证 提交 a5fcc4b5 编写于 作者: T TTerror 提交者: GitHub

update reduce_sum op on xpu (#29367)

* update reduce_sum op on xpu

* update reduce_sum op on xpu

* support running on xpu
上级 c7cada85
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h" #include "paddle/fluid/operators/reduce_ops/reduce_sum_op.h"
#include <memory> #include <memory>
#include <string> #include <string>
#include "paddle/fluid/platform/xpu_header.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -27,87 +29,121 @@ class ReduceSumXPUKernel : public framework::OpKernel<T> { ...@@ -27,87 +29,121 @@ class ReduceSumXPUKernel : public framework::OpKernel<T> {
platform::is_xpu_place(context.GetPlace()), true, platform::is_xpu_place(context.GetPlace()), true,
platform::errors::Unavailable("This kernel only runs on XPU.")); platform::errors::Unavailable("This kernel only runs on XPU."));
bool reduce_all = context.Attr<bool>("reduce_all"); bool reduce_all = context.Attr<bool>("reduce_all");
auto* input = context.Input<Tensor>("X"); auto dims = context.Attr<std::vector<int>>("dim");
auto* output = context.Output<Tensor>("Out"); auto* x = context.Input<Tensor>("X");
output->mutable_data<T>(context.GetPlace()); auto* y = context.Output<Tensor>("Out");
y->mutable_data<T>(context.GetPlace());
auto& dev_ctx = context.template device_context<DeviceContext>(); auto& dev_ctx = context.template device_context<DeviceContext>();
int out_dtype = context.Attr<int>("out_dtype");
PADDLE_ENFORCE_EQ(
out_dtype == -1, true,
platform::errors::InvalidArgument(
"XPU only support out_dtype == -1 in reduce_sum op."));
const auto* x_data = x->data<T>();
auto* y_data = y->data<T>();
const auto& input_dim_size = x->dims().size();
std::vector<int> true_dims;
for (size_t i = 0; i < dims.size(); ++i) {
if (dims[i] < 0) {
true_dims.push_back(dims[i] + input_dim_size);
} else {
true_dims.push_back(dims[i]);
}
}
std::vector<int> reduce_dims;
std::vector<int> xdims((input_dim_size));
for (int i = 0; i < input_dim_size; ++i) {
xdims[i] = x->dims()[i];
}
if (reduce_all) { if (reduce_all) {
int input_len = input->numel(); for (int i = 0; i < input_dim_size; ++i) {
int r = xpu::sum(dev_ctx.x_context(), input->data<T>(), output->data<T>(), reduce_dims.push_back(i);
input_len); }
PADDLE_ENFORCE_EQ(r == xpu::Error_t::SUCCESS, true,
platform::errors::External("XPU kernel error!"));
} else { } else {
int ndim = input->dims().size(); std::set<int> dims_set(true_dims.begin(), true_dims.end());
std::vector<int> idims; for (auto i = 0; i < input_dim_size; i++) {
for (int i = 0; i < input->dims().size(); i++) { if (dims_set.find(i) != dims_set.end()) {
idims.push_back(input->dims()[i]); if (x->dims()[i] != 1) {
reduce_dims.push_back(i);
} }
auto dims = context.Attr<std::vector<int>>("dim"); }
int rdim = dims.size(); }
int r = }
xpu::reduce(dev_ctx.x_context(), input->data<T>(), output->data<T>(),
idims.data(), ndim, dims.data(), rdim, xpu::REDUCE_SUM); if (reduce_dims.size() == 0) {
PADDLE_ENFORCE_EQ(r == xpu::Error_t::SUCCESS, true, int r = xpu::copy<T>(dev_ctx.x_context(), x_data, y_data,
platform::errors::External("XPU kernel error!")); x->numel() * sizeof(T));
PADDLE_ENFORCE_EQ(
r == xpu::Error_t::SUCCESS, true,
platform::errors::External("XPU copy in reduce_sum op return "
"wrong value[%d %s].",
r, XPUAPIErrorMsg[r]));
} else {
int r = xpu::reduce_sum<T>(dev_ctx.x_context(), x_data, y_data, xdims,
reduce_dims);
PADDLE_ENFORCE_EQ(
r == xpu::Error_t::SUCCESS, true,
platform::errors::External("XPU reduce_sum in reduce_sum op return"
" wrong value[%d %s].",
r, XPUAPIErrorMsg[r]));
} }
} }
}; };
template <typename DeviceContext, typename T> template <typename DeviceContext, typename T>
class ReduceSumGradXPUKernel : public framework::OpKernel<T> { class ReduceSumGradXPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto dims = context.Attr<std::vector<int>>("dim"); auto dims = context.Attr<std::vector<int>>("dim");
bool reduce_all = context.Attr<bool>("reduce_all"); bool reduce_all = context.Attr<bool>("reduce_all");
auto* input0 = context.Input<Tensor>("X"); auto* x = context.Input<Tensor>("X");
auto* input2 = context.Input<Tensor>(framework::GradVarName("Out")); auto* out = context.Input<Tensor>(framework::GradVarName("Out"));
auto* output = context.Output<Tensor>(framework::GradVarName("X")); auto* x_grad = context.Output<Tensor>(framework::GradVarName("X"));
output->mutable_data<T>(context.GetPlace());
const auto* input2_d = input2->data<T>(); int in_dtype = context.Attr<int>("in_dtype");
auto* output_d = output->data<T>(); PADDLE_ENFORCE_EQ(
in_dtype == -1, true,
platform::errors::InvalidArgument(
"XPU only support in_dtype == -1 in reduce_sum_grad op."));
auto& dev_ctx = context.template device_context<DeviceContext>(); auto& dev_ctx = context.template device_context<DeviceContext>();
int r = 0; x_grad->mutable_data<T>(context.GetPlace());
std::vector<int> idims; const auto* out_data = out->data<T>();
int reduce_dim = 0; auto* x_grad_data = x_grad->data<T>();
if (reduce_all) {
idims.push_back(input0->numel()); const auto& input_dim_size = x->dims().size();
idims.push_back(1); std::vector<int> true_dims;
idims.push_back(1); for (size_t i = 0; i < dims.size(); ++i) {
r = xpu::reduce_grad(dev_ctx.x_context(), input2_d, output_d, if (dims[i] < 0) {
idims.data(), idims.size(), &reduce_dim, 1, true_dims.push_back(dims[i] + input_dim_size);
xpu::REDUCE_SUM); } else {
PADDLE_ENFORCE_EQ(r == xpu::Error_t::SUCCESS, true, true_dims.push_back(dims[i]);
platform::errors::External("XPU kernel error!"));
} else if (dims.size() == 1) {
// handle reduce by one dimension
int reduce_dim_index = dims[0];
if (reduce_dim_index < 0) {
reduce_dim_index += input0->dims().size();
}
auto& input_dim = input0->dims();
int before_dim = 1;
for (int i = 0; i < reduce_dim_index; ++i) {
before_dim *= input_dim[i];
} }
int reduce_dim = input_dim[reduce_dim_index];
int after_dim = 1;
for (int i = reduce_dim_index + 1; i < input_dim.size(); ++i) {
after_dim *= input_dim[i];
} }
idims.push_back(before_dim);
idims.push_back(input_dim[reduce_dim_index]); std::vector<int> ydims(input_dim_size);
idims.push_back(after_dim); std::vector<int> xdims((input_dim_size));
reduce_dim = 1; std::set<int> dims_set(true_dims.begin(), true_dims.end());
r = xpu::reduce_grad(dev_ctx.x_context(), input2_d, output_d, for (auto i = 0; i < input_dim_size; i++) {
idims.data(), idims.size(), &reduce_dim, 1, xdims[i] = x->dims()[i];
xpu::REDUCE_SUM); if (dims_set.find(i) != dims_set.end() || reduce_all) {
PADDLE_ENFORCE_EQ(r == xpu::Error_t::SUCCESS, true, ydims[i] = 1;
platform::errors::External("XPU kernel error!"));
} else { } else {
PADDLE_THROW( ydims[i] = x->dims()[i];
platform::errors::Unimplemented("unsupport reduce sum grad"));
} }
} }
int r = xpu::broadcast<T>(dev_ctx.x_context(), out_data, x_grad_data, ydims,
xdims);
PADDLE_ENFORCE_EQ(
r == xpu::Error_t::SUCCESS, true,
platform::errors::External("XPU broadcast in reduce_sum_grad op return"
" wrong value[%d %s].",
r, XPUAPIErrorMsg[r]));
}
}; };
} // namespace operators } // namespace operators
......
...@@ -18,7 +18,8 @@ import unittest ...@@ -18,7 +18,8 @@ import unittest
import numpy as np import numpy as np
import sys import sys
sys.path.append("..") sys.path.append("..")
from op_test import OpTest, skip_check_grad_ci from op_test_xpu import OpTest, XPUOpTest
from op_test import skip_check_grad_ci
import paddle import paddle
import paddle.fluid.core as core import paddle.fluid.core as core
import paddle.fluid as fluid import paddle.fluid as fluid
...@@ -26,180 +27,128 @@ from paddle.fluid import compiler, Program, program_guard ...@@ -26,180 +27,128 @@ from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.framework import convert_np_dtype_to_dtype_ from paddle.fluid.framework import convert_np_dtype_to_dtype_
class TestSumOp(OpTest): class TestXPUReduceSumOp(XPUOpTest):
def setUp(self): def setUp(self):
self.op_type = "reduce_sum" self.init_op_type()
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.initTestCase()
self.attrs = {'use_xpu': True} self.use_xpu = True
self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.use_mkldnn = False
self.attrs = {
def test_check_output(self): 'dim': self.axis,
if paddle.is_compiled_with_xpu(): 'keep_dim': self.keep_dim,
place = paddle.XPUPlace(0) 'reduce_all': self.reduce_all
self.check_output_with_place(place) }
self.inputs = {'X': np.random.random(self.shape).astype("float32")}
def check_grad_(self): if self.attrs['reduce_all']:
self.check_grad(['X'], 'Out') self.outputs = {'Out': self.inputs['X'].sum()}
else:
self.outputs = {
class TestSumOp5D(OpTest): 'Out': self.inputs['X'].sum(axis=self.axis,
def setUp(self): keepdims=self.attrs['keep_dim'])
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
} }
self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self): def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.check_output_with_place(place)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp6D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
if paddle.is_compiled_with_xpu(): if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.check_grad_with_place(place, ['X'], 'Out')
def test_check_grad(self): def init_op_type(self):
self.check_grad(['X'], 'Out') self.op_type = "reduce_sum"
self.use_mkldnn = False
self.keep_dim = False
self.reduce_all = False
def initTestCase(self):
self.shape = (5, 6, 10)
self.axis = (0, )
class TestSumOp8D(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
}
self.attrs = {'dim': (0, 3), 'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self): class TestSumOp5D(TestXPUReduceSumOp):
if paddle.is_compiled_with_xpu(): def initTestCase(self):
place = paddle.XPUPlace(0) self.shape = (1, 2, 5, 6, 10)
self.check_output_with_place(place) self.axis = (0, )
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestSumOp6D(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (1, 1, 2, 5, 6, 10)
self.axis = (0, )
class Test1DReduce(OpTest):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random(120).astype("float64")}
self.attrs = {'use_xpu': True}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self): class TestSumOp8D(TestXPUReduceSumOp):
if paddle.is_compiled_with_xpu(): def initTestCase(self):
place = paddle.XPUPlace(0) self.shape = (1, 3, 1, 2, 1, 4, 3, 10)
self.check_output_with_place(place) self.axis = (0, 3)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class Test1DReduce(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = 120
self.axis = (0, )
class Test2DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [0], 'use_xpu': True}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
class Test2DReduce0(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (20, 10)
self.axis = (0, )
class Test2DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1], 'use_xpu': True}
self.inputs = {'X': np.random.random((20, 10)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test2DReduce1(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (20, 10)
self.axis = (1, )
class Test3DReduce0(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce0(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (5, 6, 7)
self.axis = (1, )
class Test3DReduce1(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [2], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce1(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (5, 6, 7)
self.axis = (2, )
class Test3DReduce2(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [-2], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce2(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (5, 6, 7)
self.axis = (-2, )
class Test3DReduce3(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.attrs = {'dim': [1, 2], 'use_xpu': True}
self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']))
}
class Test3DReduce3(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (5, 6, 7)
self.axis = (1, 2)
class TestKeepDimReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.attrs = {'dim': [1], 'keep_dim': True, 'use_xpu': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
class TestKeepDimReduce(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (5, 6, 10)
self.axis = (1, )
self.keep_dim = True
class TestKeepDim8DReduce(Test1DReduce):
def setUp(self):
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((2, 5, 3, 2, 2, 3, 4, 2)).astype("float64")
}
self.attrs = {'dim': (3, 4, 5), 'keep_dim': True, 'use_xpu': True}
self.outputs = {
'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']),
keepdims=self.attrs['keep_dim'])
}
class TestKeepDim8DReduce(TestXPUReduceSumOp):
def initTestCase(self):
self.shape = (2, 5, 3, 2, 2, 3, 4, 2)
self.axis = (3, 4, 5)
self.keep_dim = True
class TestReduceAll(Test1DReduce):
def setUp(self): class TestReduceAll(TestXPUReduceSumOp):
self.op_type = "reduce_sum" def initTestCase(self):
self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} self.shape = (5, 6, 2, 10)
self.attrs = {'reduce_all': True, 'use_xpu': True} self.axis = (0, )
self.outputs = {'Out': self.inputs['X'].sum()} self.reduce_all = True
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册