未验证 提交 cbd15f7d 编写于 作者: Q Qi Li 提交者: GitHub

[NPU] add kernels for elementwise_add gather_nd tile, test=develop (#36464)

上级 8757fc5b
...@@ -146,6 +146,9 @@ namespace ops = paddle::operators; ...@@ -146,6 +146,9 @@ namespace ops = paddle::operators;
namespace plat = paddle::platform; namespace plat = paddle::platform;
REGISTER_OP_NPU_KERNEL(elementwise_add, ops::ElementwiseAddNPUKernel<float>, REGISTER_OP_NPU_KERNEL(elementwise_add, ops::ElementwiseAddNPUKernel<float>,
#ifdef PADDLE_WITH_ASCEND_INT64
ops::ElementwiseAddNPUKernel<int64_t>,
#endif
ops::ElementwiseAddNPUKernel<plat::float16>); ops::ElementwiseAddNPUKernel<plat::float16>);
REGISTER_OP_NPU_KERNEL(elementwise_add_grad, REGISTER_OP_NPU_KERNEL(elementwise_add_grad,
......
...@@ -18,7 +18,10 @@ limitations under the License. */ ...@@ -18,7 +18,10 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T> using Tensor = framework::Tensor;
using NPUDeviceContext = platform::NPUDeviceContext;
template <typename T>
class GatherNdNPUKernel : public framework::OpKernel<T> { class GatherNdNPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
...@@ -49,14 +52,12 @@ class GatherNdNPUKernel : public framework::OpKernel<T> { ...@@ -49,14 +52,12 @@ class GatherNdNPUKernel : public framework::OpKernel<T> {
framework::proto::VarType::INT64))); framework::proto::VarType::INT64)));
const auto &runner = NpuOpRunner("GatherNd", {*x, *index}, {*out}, {}); const auto &runner = NpuOpRunner("GatherNd", {*x, *index}, {*out}, {});
auto stream = auto stream = ctx.template device_context<NPUDeviceContext>().stream();
ctx.template device_context<paddle::platform::NPUDeviceContext>()
.stream();
runner.Run(stream); runner.Run(stream);
} }
}; };
template <typename DeviceContext, typename T> template <typename T>
class GatherNdGradNPUKernel : public framework::OpKernel<T> { class GatherNdGradNPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext &ctx) const override { void Compute(const framework::ExecutionContext &ctx) const override {
...@@ -91,10 +92,7 @@ class GatherNdGradNPUKernel : public framework::OpKernel<T> { ...@@ -91,10 +92,7 @@ class GatherNdGradNPUKernel : public framework::OpKernel<T> {
dout = &tmp_tensor2; dout = &tmp_tensor2;
} }
auto stream = auto stream = ctx.template device_context<NPUDeviceContext>().stream();
ctx.template device_context<paddle::platform::NPUDeviceContext>()
.stream();
platform::NPUMemsetAsync(static_cast<void *>(p), 0, dx->numel() * sizeof(T), platform::NPUMemsetAsync(static_cast<void *>(p), 0, dx->numel() * sizeof(T),
stream); stream);
...@@ -108,13 +106,13 @@ class GatherNdGradNPUKernel : public framework::OpKernel<T> { ...@@ -108,13 +106,13 @@ class GatherNdGradNPUKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_NPU_KERNEL( REGISTER_OP_NPU_KERNEL(gather_nd,
gather_nd, ops::GatherNdNPUKernel<paddle::platform::NPUDeviceContext, ops::GatherNdNPUKernel<paddle::platform::float16>,
paddle::platform::float16>, #ifdef PADDLE_WITH_ASCEND_INT64
ops::GatherNdNPUKernel<paddle::platform::NPUDeviceContext, float>); ops::GatherNdNPUKernel<int64_t>,
#endif
REGISTER_OP_NPU_KERNEL( ops::GatherNdNPUKernel<float>);
gather_nd_grad,
ops::GatherNdGradNPUKernel<paddle::platform::NPUDeviceContext, REGISTER_OP_NPU_KERNEL(gather_nd_grad,
paddle::platform::float16>, ops::GatherNdGradNPUKernel<paddle::platform::float16>,
ops::GatherNdGradNPUKernel<paddle::platform::NPUDeviceContext, float>); ops::GatherNdGradNPUKernel<float>);
...@@ -16,7 +16,11 @@ limitations under the License. */ ...@@ -16,7 +16,11 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
template <typename DeviceContext, typename T>
using Tensor = framework::Tensor;
using NPUDeviceContext = platform::NPUDeviceContext;
template <typename T>
class TileNPUKernel : public framework::OpKernel<T> { class TileNPUKernel : public framework::OpKernel<T> {
public: public:
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
...@@ -92,18 +96,21 @@ class TileNPUKernel : public framework::OpKernel<T> { ...@@ -92,18 +96,21 @@ class TileNPUKernel : public framework::OpKernel<T> {
std::vector<int> temp(repeat_times.size(), 1); std::vector<int> temp(repeat_times.size(), 1);
if (repeat_times == temp) { if (repeat_times == temp) {
framework::TensorCopy( framework::TensorCopy(*in0, context.GetPlace(),
*in0, context.GetPlace(), context.template device_context<NPUDeviceContext>(),
context.template device_context<platform::DeviceContext>(), out0); out0);
return; return;
} }
const auto& runner = // const auto& runner =
NpuOpRunner("TileD", {*in0}, {*out0}, {{"multiples", repeat_times}}); // NpuOpRunner("TileD", {*in0}, {*out0}, {{"multiples", repeat_times}});
auto stream = auto stream = context.template device_context<NPUDeviceContext>().stream();
context.template device_context<paddle::platform::NPUDeviceContext>() NpuOpRunner runner;
.stream(); runner.SetType("Tile")
runner.Run(stream); .AddInput(*in0)
.AddInput(std::move(repeat_times))
.AddOutput(*out0)
.Run(stream);
} }
}; };
...@@ -111,8 +118,9 @@ class TileNPUKernel : public framework::OpKernel<T> { ...@@ -111,8 +118,9 @@ class TileNPUKernel : public framework::OpKernel<T> {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
REGISTER_OP_NPU_KERNEL( REGISTER_OP_NPU_KERNEL(tile, ops::TileNPUKernel<float>, ops::TileNPUKernel<int>,
tile, ops::TileNPUKernel<paddle::platform::NPUDeviceContext, float>, #ifdef PADDLE_WITH_ASCEND_INT64
ops::TileNPUKernel<paddle::platform::NPUDeviceContext, int>, ops::TileNPUKernel<int64_t>,
ops::TileNPUKernel<paddle::platform::NPUDeviceContext, #endif
paddle::platform::float16>); ops::TileNPUKernel<bool>,
ops::TileNPUKernel<paddle::platform::float16>);
...@@ -65,7 +65,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -65,7 +65,7 @@ class TestElementwiseAddOp(OpTest):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad_normal(self): def test_check_grad_normal(self):
if self.dtype == np.float16: if self.dtype == np.float16 or self.dtype == np.int64:
return return
self.check_grad_with_place( self.check_grad_with_place(
...@@ -75,7 +75,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -75,7 +75,7 @@ class TestElementwiseAddOp(OpTest):
max_relative_error=0.006, ) max_relative_error=0.006, )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
if self.dtype == np.float16: if self.dtype == np.float16 or self.dtype == np.int64:
return return
self.check_grad_with_place( self.check_grad_with_place(
...@@ -86,7 +86,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -86,7 +86,7 @@ class TestElementwiseAddOp(OpTest):
max_relative_error=0.006, ) max_relative_error=0.006, )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
if self.dtype == np.float16: if self.dtype == np.float16 or self.dtype == np.int64:
return return
self.check_grad_with_place( self.check_grad_with_place(
...@@ -102,6 +102,11 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): ...@@ -102,6 +102,11 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
self.dtype = np.float16 self.dtype = np.float16
class TestINT64ElementwiseAddOp(TestElementwiseAddOp):
def init_dtype(self):
self.dtype = np.int64
@skip_check_grad_ci( @skip_check_grad_ci(
reason="[skip shape check] Use y_shape(1) to test broadcast.") reason="[skip shape check] Use y_shape(1) to test broadcast.")
class TestElementwiseAddOp_scalar(TestElementwiseAddOp): class TestElementwiseAddOp_scalar(TestElementwiseAddOp):
...@@ -507,8 +512,8 @@ class TestAddApi(unittest.TestCase): ...@@ -507,8 +512,8 @@ class TestAddApi(unittest.TestCase):
def test_dygraph(self): def test_dygraph(self):
with fluid.dygraph.guard(paddle.NPUPlace(0)): with fluid.dygraph.guard(paddle.NPUPlace(0)):
np_x = np.array([2, 3, 4]).astype('float64') np_x = np.array([2, 3, 4]).astype('float32')
np_y = np.array([1, 5, 2]).astype('float64') np_y = np.array([1, 5, 2]).astype('float32')
x = fluid.dygraph.to_variable(np_x) x = fluid.dygraph.to_variable(np_x)
y = fluid.dygraph.to_variable(np_y) y = fluid.dygraph.to_variable(np_y)
z = self._executed_api(x, y) z = self._executed_api(x, y)
......
...@@ -61,7 +61,7 @@ def test_class1(op_type, typename): ...@@ -61,7 +61,7 @@ def test_class1(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place(self.place, ['X'], 'Out') self.check_grad_with_place(self.place, ['X'], 'Out')
...@@ -88,7 +88,7 @@ def test_class2(op_type, typename): ...@@ -88,7 +88,7 @@ def test_class2(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place(self.place, ['X'], 'Out') self.check_grad_with_place(self.place, ['X'], 'Out')
...@@ -120,7 +120,7 @@ def test_class3(op_type, typename): ...@@ -120,7 +120,7 @@ def test_class3(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place( self.check_grad_with_place(
...@@ -153,7 +153,7 @@ def test_class4(op_type, typename): ...@@ -153,7 +153,7 @@ def test_class4(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place(self.place, ['X'], 'Out') self.check_grad_with_place(self.place, ['X'], 'Out')
...@@ -184,7 +184,7 @@ def test_class5(op_type, typename): ...@@ -184,7 +184,7 @@ def test_class5(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place(self.place, ['X'], 'Out') self.check_grad_with_place(self.place, ['X'], 'Out')
...@@ -217,7 +217,7 @@ def test_class6(op_type, typename): ...@@ -217,7 +217,7 @@ def test_class6(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place(self.place, ['X'], 'Out') self.check_grad_with_place(self.place, ['X'], 'Out')
...@@ -252,7 +252,7 @@ def test_class7(op_type, typename): ...@@ -252,7 +252,7 @@ def test_class7(op_type, typename):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if typename == "float16": if typename == "float16" or typename == "int64":
self.__class__.no_need_check_grad = True self.__class__.no_need_check_grad = True
else: else:
self.check_grad_with_place(self.place, ['X'], 'Out') self.check_grad_with_place(self.place, ['X'], 'Out')
...@@ -276,7 +276,7 @@ class TestGatherNdAPI(unittest.TestCase): ...@@ -276,7 +276,7 @@ class TestGatherNdAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
for _typename in {'float16', 'float32'}: for _typename in {'float16', 'float32', 'int64'}:
test_class1('gather_nd', _typename) test_class1('gather_nd', _typename)
test_class2('gather_nd', _typename) test_class2('gather_nd', _typename)
test_class3('gather_nd', _typename) test_class3('gather_nd', _typename)
......
...@@ -206,7 +206,7 @@ class TestTileOpInt64_t(OpTest): ...@@ -206,7 +206,7 @@ class TestTileOpInt64_t(OpTest):
self.op_type = "tile" self.op_type = "tile"
self.inputs = { self.inputs = {
'X': np.random.randint( 'X': np.random.randint(
10, size=(2, 4, 5)).astype("int32") 10, size=(2, 4, 5)).astype("int64")
} }
self.attrs = {'repeat_times': [2, 1, 4]} self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4)) output = np.tile(self.inputs['X'], (2, 1, 4))
...@@ -219,6 +219,24 @@ class TestTileOpInt64_t(OpTest): ...@@ -219,6 +219,24 @@ class TestTileOpInt64_t(OpTest):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
# Situation 6: input x is Bool
class TestTileOpBool(OpTest):
def setUp(self):
self.set_npu()
self.place = paddle.NPUPlace(0)
self.op_type = "tile"
self.inputs = {'X': np.random.randint(1, size=(2, 4, 5)).astype("bool")}
self.attrs = {'repeat_times': [2, 1, 4]}
output = np.tile(self.inputs['X'], (2, 1, 4))
self.outputs = {'Out': output}
def set_npu(self):
self.__class__.use_npu = True
def test_check_output(self):
self.check_output_with_place(self.place)
# Test python API # Test python API
class TestTileAPI(unittest.TestCase): class TestTileAPI(unittest.TestCase):
def test_api(self): def test_api(self):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册