diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 4797b0e7154e0c7f425d40bef78ebcfcb4081b1f..78509b145280ea45a0ed09cb4e56b8d06356b3c6 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -156,12 +156,11 @@ cc_library(tensor_formatter SRCS tensor_formatter.cc DEPS ${OP_HEADER_DEPS}) if (WITH_PYTHON) cc_library(py_func_op SRCS py_func_op.cc DEPS op_registry python pybind) endif() -if (WITH_ASCEND_CL) - cc_test(lookup_table_v2_op_npu_test SRCS lookup_table_v2_op_npu_test.cc DEPS op_registry lookup_table_v2_op scope device_context enforce executor compare_op) -endif() if (WITH_ASCEND_CL) cc_test(range_op_npu_test SRCS range_op_npu_test.cc DEPS op_registry range_op scope device_context enforce executor) + cc_test(lookup_table_v2_op_npu_test SRCS lookup_table_v2_op_npu_test.cc DEPS op_registry lookup_table_v2_op scope device_context enforce executor compare_op) + cc_test(expand_op_npu_test SRCS expand_op_npu_test.cc DEPS op_registry expand_op scope device_context enforce executor compare_op) endif() set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") diff --git a/paddle/fluid/operators/expand_op.h b/paddle/fluid/operators/expand_op.h index 8b79a1feb8ce1f7ee5b6237b4437ea67fa33ee11..9ea72cf0e94f10f00fdbb9f00f8b75239375487b 100644 --- a/paddle/fluid/operators/expand_op.h +++ b/paddle/fluid/operators/expand_op.h @@ -56,6 +56,12 @@ inline std::vector get_expand_times( TensorCopySync(*expand_tensor, platform::CPUPlace(), &cpu_expand_tensor); expand_data = cpu_expand_tensor.data(); } +#ifdef PADDLE_WITH_ASCEND_CL + if (platform::is_npu_place(expand_tensor->place())) { + TensorCopySync(*expand_tensor, platform::CPUPlace(), &cpu_expand_tensor); + expand_data = cpu_expand_tensor.data(); + } +#endif #ifdef PADDLE_WITH_XPU if (platform::is_xpu_place(expand_tensor->place())) { TensorCopySync(*expand_tensor, platform::CPUPlace(), &cpu_expand_tensor); diff --git a/paddle/fluid/operators/expand_op_npu.cc b/paddle/fluid/operators/expand_op_npu.cc new file mode 100644 index 0000000000000000000000000000000000000000..1446637da6a77ced53e3aaa980012c2a6867cadb --- /dev/null +++ b/paddle/fluid/operators/expand_op_npu.cc @@ -0,0 +1,82 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_ASCEND_CL +#include +#include +#include + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/expand_op.h" +#include "paddle/fluid/operators/npu_op_runner.h" + +namespace paddle { +namespace operators { + +template +class ExpandNPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto rank = context.Input("X")->dims().size(); + PADDLE_ENFORCE_GE( + rank, 1, + platform::errors::InvalidArgument( + "The number of dimensions of the input 'x' for Op(expand) " + "must be greater than or equal to 1, but the value received is %d.", + rank)); + PADDLE_ENFORCE_LE( + rank, MAX_RANK_SUPPORTED, + platform::errors::InvalidArgument( + "The number of dimensions of the input 'x' for Op(expand) " + "must be less than or equal to %d, but the value received is %d.", + MAX_RANK_SUPPORTED, rank)); + switch (rank) { REP_EXPAND_TEMPLATE(MAX_RANK_SUPPORTED) } + } + + protected: + template + void Expand(const framework::ExecutionContext& context) const { + auto* in0 = context.Input("X"); + auto in_dims = in0->dims(); + auto expand_times = get_expand_times(context); + PADDLE_ENFORCE_EQ( + static_cast(in_dims.size()), expand_times.size(), + platform::errors::InvalidArgument( + "The number of elements (%d) of 'expand_times' for " + "Op(expand) must be equal to the number " + "of dimensions (%d) of the input.", + expand_times.size(), static_cast(in_dims.size()))); + auto* out0 = context.Output("Out"); + framework::DDim out_dims(in_dims); + for (size_t i = 0; i < expand_times.size(); ++i) { + out_dims[i] *= expand_times[i]; + } + out0->Resize(out_dims); + out0->mutable_data(context.device_context().GetPlace()); + auto runner = NpuOpRunner("TileD", {*in0}, {*out0}, {{"multiples", expand_times}}); + auto stream = + context.template device_context() + .stream(); + runner.Run(stream); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_NPU_KERNEL( + expand, ops::ExpandNPUKernel, + ops::ExpandNPUKernel); + +#endif diff --git a/paddle/fluid/operators/expand_op_npu_test.cc b/paddle/fluid/operators/expand_op_npu_test.cc new file mode 100644 index 0000000000000000000000000000000000000000..95f7865a8a3a4ee22600e4a64c7f2e7bf0fa2a2c --- /dev/null +++ b/paddle/fluid/operators/expand_op_npu_test.cc @@ -0,0 +1,74 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef _WIN32 +#include +#endif + +#include +#include +#include // NOLINT +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/dropout_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/string/printf.h" + +namespace f = paddle::framework; +namespace p = paddle::platform; +namespace m = paddle::operators::math; + +USE_OP(expand); +USE_OP_DEVICE_KERNEL(expand, NPU); + +template +void Compare(f::Scope* scope, const p::DeviceContext& ctx) { + // init + auto in = scope->Var("X"); + auto expand_times = scope->Var("ExpandTimes"); + auto out = scope->Var("Out"); + auto in_t = in->GetMutable(); + auto out_t = out->GetMutable(); + auto expand_times_t = expand_times->GetMutable(); + + auto place = ctx.GetPlace(); + TensorFromVector(std::vector(3 * 1 * 7, 1), ctx, in_t); + TensorFromVector(std::vector({1, 10, 1}), ctx, expand_times_t); + + in_t->Resize(f::make_ddim({3, 1, 7})); + expand_times_t->Resize(f::make_ddim({3})); + out_t->Resize(f::make_ddim({3, 10, 7})); + out_t->mutable_data(place); + + f::AttributeMap attrs = {{}}; + auto op = f::OpRegistry::CreateOp( + "expand", {{"X", {"X"}}, {"ExpandTimes", {"ExpandTimes"}}}, + {{"Out", {"Out"}}}, attrs); + op->Run(*scope, place); + ctx.Wait(); + + auto out_dim = out_t->dims(); + EXPECT_EQ(out_dim.at(0), 3); + EXPECT_EQ(out_dim.at(1), 10); + EXPECT_EQ(out_dim.at(2), 7); +} + +TEST(expand, NPU_fp32) { + f::Scope scope; + p::NPUDeviceContext ctx(p::NPUPlace(0)); + Compare(&scope, ctx); +} diff --git a/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py new file mode 100644 index 0000000000000000000000000000000000000000..6effb3c80aded317d79c99a034e236a2d6480487 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/npu/test_expand_op_npu.py @@ -0,0 +1,141 @@ +# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import numpy as np +import unittest +import sys +sys.path.append("..") +from op_test import OpTest +import paddle +import paddle.fluid as fluid + +paddle.enable_static() +SEED = 2021 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestExpand(OpTest): + def setUp(self): + self.set_npu() + self.op_type = "expand" + self.place = paddle.NPUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.randn(3,1,7).astype(self.dtype) + out = np.tile(x, [1,10,1]) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} + self.attrs = {'expand_times': [1,10,1]} + self.outputs = {'Out': out} + + def set_npu(self): + self.__class__.use_npu = True + + def init_dtype(self): + self.dtype = np.float32 + + def test_check_output(self): + self.check_output_with_place(self.place, check_dygraph=False) + + # TODO(ascendrc): Add grad test + # def test_check_grad(self): + # if self.dtype == np.float16: + # return + # self.check_grad(['X'], 'Out') + # + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestExpandV2(TestExpand): + def setUp(self): + self.set_npu() + self.op_type = "expand" + self.place = paddle.NPUPlace(0) + + self.init_dtype() + np.random.seed(SEED) + x = np.random.randn(3,1,7).astype(self.dtype) + out = np.tile(x, [1,10,1]) + expand_times = np.array([1,10,1]).astype(np.int32) + + self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x), 'ExpandTimes': OpTest.np_dtype_to_fluid_dtype(expand_times)} + self.attrs = {} + self.outputs = {'Out': out} + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestExpandFp16(TestExpand): + no_need_check_grad = True + def init_dtype(self): + self.dtype = np.float16 + + +@unittest.skipIf(not paddle.is_compiled_with_npu(), + "core is not compiled with NPU") +class TestExpandNet(unittest.TestCase): + def _test(self, run_npu=True): + main_prog = paddle.static.Program() + startup_prog = paddle.static.Program() + main_prog.random_seed = SEED + startup_prog.random_seed = SEED + np.random.seed(SEED) + + a_np = np.random.random(size=(32, 1)).astype('float32') + label_np = np.random.randint(2, size=(32, 1)).astype('int64') + + with paddle.static.program_guard(main_prog, startup_prog): + a = paddle.static.data(name="a", shape=[32, 1], dtype='float32') + label = paddle.static.data( + name="label", shape=[32, 1], dtype='int64') + + res = paddle.fluid.layers.expand(a, [1,32]) + loss = res.sum() + sgd = fluid.optimizer.SGD(learning_rate=0.01) + sgd.minimize(loss) + + if run_npu: + place = paddle.NPUPlace(0) + else: + place = paddle.CPUPlace() + + exe = paddle.static.Executor(place) + exe.run(startup_prog) + + for epoch in range(100): + + loss_res = exe.run( + main_prog, + feed={"a": a_np, + "label": label_np}, + fetch_list=[loss]) + if epoch % 10 == 0: + print("Epoch {} | Loss: {}".format(epoch, loss)) + + return loss_res + + def test_npu(self): + cpu_loss = self._test(False) + npu_loss = self._test(True) + + self.assertTrue(np.allclose(npu_loss, cpu_loss)) + + +if __name__ == '__main__': + unittest.main() +