diff --git a/paddle/fluid/operators/lookup_table_v2_op_npu.cc b/paddle/fluid/operators/lookup_table_v2_op_npu.cc index 686ffc98de77f9ca3f6761222268df33d7a232a8..2a8f47462345188c3870ca07119fe7687a1ebe9f 100644 --- a/paddle/fluid/operators/lookup_table_v2_op_npu.cc +++ b/paddle/fluid/operators/lookup_table_v2_op_npu.cc @@ -65,17 +65,31 @@ class LookupTableV2GradNPUKernel : public framework::OpKernel { ctx.template device_context() .stream(); - const auto &runner_zeros = - NpuOpRunner("ZerosLike", {*table_grad_t}, {*table_grad_t}); - runner_zeros.Run(stream); - - // NOTE(zhiqiu): It seems in cann 20.1, the first input and output - // can be different tensor, but in cann 20.2+, it does inplace operation. - // Thus, the first input and output should be same tensor. - const auto &runner_scatter = - NpuOpRunner("ScatterAdd", {*table_grad_t, *ids_t, *output_grad_t}, - {*table_grad_t}, {{"use_locking", true}}); - runner_scatter.Run(stream); + int embedding_dim = table_grad_t->dims()[1]; + + if (embedding_dim % 32 == 0) { + // NOTE(pangyoki): The embedding_dim of Tensor used in + // EmbeddingDenseGrad must be an integer multiple of 32. + int num_weights = table_grad_t->dims()[0]; + const auto &runner = + NpuOpRunner("EmbeddingDenseGrad", {*output_grad_t, *ids_t}, + {*table_grad_t}, {{"num_weights", num_weights}, + {"padding_idx", -1}, + {"scale_grad_by_freq", false}}); + runner.Run(stream); + } else { + const auto &runner_zeros = + NpuOpRunner("ZerosLike", {*table_grad_t}, {*table_grad_t}); + runner_zeros.Run(stream); + + // NOTE(zhiqiu): It seems in cann 20.1, the first input and output + // can be different tensor, but in cann 20.2+, it does inplace operation. + // Thus, the first input and output should be same tensor. + const auto &runner_scatter = + NpuOpRunner("ScatterAdd", {*table_grad_t, *ids_t, *output_grad_t}, + {*table_grad_t}, {{"use_locking", true}}); + runner_scatter.Run(stream); + } } }; } // namespace operators diff --git a/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py index 2463ddb7137acd683fde3ce2c5d09341a5c4a4d2..41fe0636bd7790433dee33dd358ec7ed6d7ae9e5 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_lookup_table_v2_op_npu.py @@ -35,14 +35,14 @@ class TestLookupTableV2(OpTest): self.place = paddle.NPUPlace(0) self.init_dtype() + self.init_dim() np.random.seed(SEED) bsz = 6 seqlen = 8 vocab = 10 - dim = 20 - w = np.ones([vocab, dim]).astype(self.dtype) + w = np.ones([vocab, self.dim]).astype(self.dtype) x = np.random.randint(0, vocab, size=(bsz, seqlen)).astype(np.int32) - out = np.ones([bsz, seqlen, dim]).astype(self.dtype) + out = np.ones([bsz, seqlen, self.dim]).astype(self.dtype) self.inputs = { 'W': OpTest.np_dtype_to_fluid_dtype(w), @@ -62,6 +62,10 @@ class TestLookupTableV2(OpTest): def init_dtype(self): self.dtype = np.float32 + def init_dim(self): + # embedding_dim is not multiple of 32 + self.dim = 20 + def test_check_output(self): self.check_output_with_place(self.place, check_dygraph=False) @@ -85,5 +89,29 @@ class TestLookupTableV2FP16(TestLookupTableV2): self.__class__.no_need_check_grad = True +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestLookupTableV2Dim32(TestLookupTableV2): + def init_dim(self): + # embedding_dim is multiple of 32 + self.dim = 64 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestLookupTableV2Dim32FP16(TestLookupTableV2): + no_need_check_grad = True + + def init_dtype(self): + self.dtype = np.float16 + + def init_dim(self): + self.dim = 64 + + def set_npu(self): + self.__class__.use_npu = True + self.__class__.no_need_check_grad = True + + if __name__ == '__main__': unittest.main()