From 3523bbe86376878fcda52b2dcc152db76971db87 Mon Sep 17 00:00:00 2001 From: Qi Li Date: Tue, 26 Oct 2021 13:56:18 +0800 Subject: [PATCH] [NPU] fix argsort op, test=develop (#36576) * [NPU] fix argsort op, test=develop * remove debug files, test=develop * fix typo, test=develop * address review comments, test=develop --- paddle/fluid/operators/arg_max_op_xpu.cc | 2 +- paddle/fluid/operators/arg_min_op_npu.cc | 2 +- paddle/fluid/operators/argsort_op_npu.cc | 345 ++++++++---------- paddle/fluid/operators/cumsum_op_npu.cc | 2 +- paddle/fluid/operators/dropout_op_npu.cc | 2 +- paddle/fluid/operators/expand_v2_op_npu.cc | 2 +- paddle/fluid/operators/huber_loss_op_npu.cc | 5 +- .../fluid/operators/interpolate_v2_op_npu.cc | 2 +- paddle/fluid/operators/is_empty_op_npu.cc | 2 +- paddle/fluid/operators/log_loss_op_npu.cc | 2 +- paddle/fluid/operators/meshgrid_op_npu.cc | 2 +- paddle/fluid/operators/pad3d_op_npu.cc | 2 +- .../operators/reduce_ops/reduce_max_op_npu.cc | 2 +- .../reduce_ops/reduce_prod_op_npu.cc | 2 +- ...igmoid_cross_entropy_with_logits_op_npu.cc | 2 +- paddle/fluid/operators/slice_op_npu.cc | 2 +- paddle/fluid/operators/tril_triu_op_npu.cc | 2 +- .../unittests/npu/test_argsort_op_npu.py | 8 +- 18 files changed, 171 insertions(+), 217 deletions(-) diff --git a/paddle/fluid/operators/arg_max_op_xpu.cc b/paddle/fluid/operators/arg_max_op_xpu.cc index 8060b5cf75..71ec26ea5a 100644 --- a/paddle/fluid/operators/arg_max_op_xpu.cc +++ b/paddle/fluid/operators/arg_max_op_xpu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #ifdef PADDLE_WITH_XPU diff --git a/paddle/fluid/operators/arg_min_op_npu.cc b/paddle/fluid/operators/arg_min_op_npu.cc index f776412c16..cc81e32008 100644 --- a/paddle/fluid/operators/arg_min_op_npu.cc +++ b/paddle/fluid/operators/arg_min_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/arg_min_max_op_base.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/argsort_op_npu.cc b/paddle/fluid/operators/argsort_op_npu.cc index e36dd322e0..f2a57b4b9b 100644 --- a/paddle/fluid/operators/argsort_op_npu.cc +++ b/paddle/fluid/operators/argsort_op_npu.cc @@ -1,8 +1,11 @@ /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -15,156 +18,142 @@ limitations under the License. */ namespace paddle { namespace operators { -template +using Tensor = framework::Tensor; +using NPUDeviceContext = platform::NPUDeviceContext; + +template +static void TranposeNPU(const framework::ExecutionContext& ctx, + const aclrtStream& stream, std::vector* perm, + const Tensor& in, Tensor* out) { + out->mutable_data(ctx.GetPlace()); + NpuOpRunner runner; + runner.SetType("Transpose") + .AddInput(in) + .AddInput(std::move(*perm)) + .AddOutput(*out) + .Run(stream); +} + +static void CastToInt64(const framework::ExecutionContext& ctx, + const aclrtStream& stream, const Tensor& in, + Tensor* out) { + out->mutable_data(ctx.GetPlace()); + NpuOpRunner runner; + runner.SetType("Cast") + .AddInput(in) + .AddOutput(*out) + .AddAttr("dst_type", ACL_INT64) + .Run(stream); +} + +template class ArgsortNPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input("X"); auto* output = ctx.Output("Out"); - output->mutable_data(ctx.GetPlace()); auto* indices = ctx.Output("Indices"); - indices->mutable_data(ctx.GetPlace()); + int axis = ctx.Attr("axis"); + bool descending = ctx.Attr("descending"); - int32_t axis = ctx.Attr("axis"); - auto in_dims = indices->dims(); + auto in_dims = input->dims(); axis = (axis < 0) ? (in_dims.size() + axis) : axis; - bool descending = ctx.Attr("descending"); - auto stream = - ctx.template device_context() - .stream(); - framework::NPUAttributeMap sort_attr_input = { - {"axis", static_cast(-1)}, {"descending", descending}}; + + auto stream = ctx.template device_context().stream(); + framework::NPUAttributeMap attr = {{"axis", -1}, + {"descending", descending}}; + + Tensor indices_tmp(framework::proto::VarType::INT32); + indices_tmp.Resize(indices->dims()); if (axis == -1 || axis + 1 == in_dims.size()) { - const auto& sort_runner = - NpuOpRunner("Sort", {*input}, {*output, *indices}, sort_attr_input); - sort_runner.Run(stream); + output->mutable_data(ctx.GetPlace()); + indices_tmp.mutable_data(ctx.GetPlace()); + const auto& runner = + NpuOpRunner("Sort", {*input}, {*output, indices_tmp}, attr); + runner.Run(stream); } else { - // transpose - std::vector trans; - for (int i = 0; i < axis; i++) { - trans.push_back(i); - } - trans.push_back(in_dims.size() - 1); - for (int i = axis + 1; i < in_dims.size() - 1; i++) { - trans.push_back(i); + std::vector perm; + for (int64_t i = 0; i < in_dims.size(); i++) { + perm.emplace_back(i); } - trans.push_back(axis); - framework::DDim trans_dims(in_dims); - for (size_t i = 0; i < trans.size(); i++) { - trans_dims[i] = in_dims[trans[i]]; + std::swap(perm[axis], perm[in_dims.size() - 1]); + + std::vector shape; + for (size_t i = 0; i < perm.size(); i++) { + shape.emplace_back(in_dims[perm[i]]); } - framework::NPUAttributeMap trans_attr_input = {{"perm", trans}}; - Tensor trans_input; - trans_input.mutable_data(trans_dims, ctx.GetPlace()); - const auto& trans_input_runner = - NpuOpRunner("TransposeD", {*input}, {trans_input}, trans_attr_input); - trans_input_runner.Run(stream); - Tensor trans_indices; - trans_indices.mutable_data(trans_dims, ctx.GetPlace()); - const auto& trans_indice_runner = NpuOpRunner( - "TransposeD", {*indices}, {trans_indices}, trans_attr_input); - trans_indice_runner.Run(stream); - Tensor trans_output; + auto trans_dims = framework::make_ddim(shape); + + Tensor trans_input(input->type()); + trans_input.Resize(trans_dims); + TranposeNPU(ctx, stream, &perm, *input, &trans_input); + + Tensor trans_output(input->type()); + Tensor trans_indices(framework::proto::VarType::INT32); trans_output.mutable_data(trans_dims, ctx.GetPlace()); - const auto& trans_output_runner = NpuOpRunner( - "TransposeD", {*output}, {trans_output}, trans_attr_input); - trans_output_runner.Run(stream); - const auto& sort_runner = - NpuOpRunner("Sort", {trans_input}, {trans_output, trans_indices}, - sort_attr_input); - sort_runner.Run(stream); - // transpose back - const auto& trans_indices_back_runner = NpuOpRunner( - "TransposeD", {trans_indices}, {*indices}, trans_attr_input); - trans_indices_back_runner.Run(stream); - const auto& trans_output_back_runner = NpuOpRunner( - "TransposeD", {trans_output}, {*output}, trans_attr_input); - trans_output_back_runner.Run(stream); + trans_indices.mutable_data(trans_dims, ctx.GetPlace()); + + const auto& runner = NpuOpRunner("Sort", {trans_input}, + {trans_output, trans_indices}, attr); + runner.Run(stream); + + TranposeNPU(ctx, stream, &perm, trans_output, output); + TranposeNPU(ctx, stream, &perm, trans_indices, &indices_tmp); } + CastToInt64(ctx, stream, indices_tmp, indices); } }; -template -static void ReshapeNPU(const framework::Tensor* input, - const std::vector& input_shapes, - framework::Tensor* output) { - output->ShareDataWith(*input); - output->Resize(framework::make_ddim(std::move(input_shapes))); -} - template static void FullAssignNPU(const framework::ExecutionContext& ctx, - Type ind_lastdim, Type outer_dim, - const framework::DDim& trans_dims, - const framework::Tensor* input, - const framework::Tensor* indices, - framework::Tensor* t_out) { - // reshape input - Type input_shape = ind_lastdim * outer_dim; - std::vector input_shapes = {input_shape}; - Tensor input_reshape_tensor(input->type()); - ReshapeNPU(input, input_shapes, &input_reshape_tensor); - // reshape index - std::vector index_shapes = {outer_dim, ind_lastdim}; - framework::DDim ind_2d = framework::make_ddim({outer_dim, ind_lastdim}); - Tensor ind_2d_tensor(indices->type()); - ReshapeNPU(indices, index_shapes, &ind_2d_tensor); - // range_flatten_index - std::vector range_flatten_index; - for (Type i = 0; i < input_shape; i += ind_lastdim) { - range_flatten_index.push_back(static_cast(i)); + const aclrtStream& stream, + const framework::DDim in_dims, const Tensor& input, + const Tensor& indices, Tensor* t_out) { + const int64_t input_height = + framework::product(framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); + const int64_t input_width = in_dims[in_dims.size() - 1]; + + Tensor input_tmp; + input_tmp.ShareDataWith(input); + input_tmp.Resize( + framework::make_ddim(std::vector{input_height * input_width})); + + Tensor indices_tmp; + indices_tmp.ShareDataWith(indices); + indices_tmp.Resize( + framework::make_ddim(std::vector{input_height, input_width})); + + std::vector indexs_value; + for (Type i = 0; i < input_height; i++) { + indexs_value.push_back(i * input_width); } - Tensor range_flatten_index_tensor(framework::proto::VarType::INT32); - range_flatten_index_tensor.Resize(framework::make_ddim({outer_dim})); - range_flatten_index_tensor.mutable_data( - {static_cast(range_flatten_index.size())}, ctx.GetPlace()); - TensorFromVector(range_flatten_index, ctx.device_context(), - &range_flatten_index_tensor); - Tensor range_flatten_index_expand_tensor(range_flatten_index_tensor.type()); - std::vector flatten_shape = {outer_dim, 1}; - ReshapeNPU(&range_flatten_index_tensor, flatten_shape, - &range_flatten_index_expand_tensor); - auto stream = - ctx.template device_context() - .stream(); - Tensor ind_2d_add_tensor; - ind_2d_add_tensor.mutable_data(ind_2d, ctx.GetPlace()); - const auto& runner_ind_2d_tensor = NpuOpRunner( - std::string("Add"), {ind_2d_tensor, range_flatten_index_expand_tensor}, - {ind_2d_add_tensor}, {}); - runner_ind_2d_tensor.Run(stream); - Tensor ind_reshape_tensor(ind_2d_add_tensor.type()); - ReshapeNPU(&ind_2d_add_tensor, input_shapes, &ind_reshape_tensor); - Tensor ind_reshape_expand_tensor(ind_reshape_tensor.type()); - std::vector ind_shape = {input_shape, 1}; - ReshapeNPU(&ind_reshape_tensor, ind_shape, &ind_reshape_expand_tensor); - // expand_index - Tensor input_scatter_tensor; - input_scatter_tensor.Resize({input_shape}); - input_scatter_tensor.mutable_data(ctx.GetPlace()); - Tensor input_scatter_tensor_ori; - input_scatter_tensor_ori.Resize({input_shape}); - input_scatter_tensor_ori.mutable_data(ctx.GetPlace()); - std::vector trans_shapes; - - for (int i = 0; i < trans_dims.size(); i++) { - trans_shapes.push_back(trans_dims[i]); - } - NpuOpRunner runner_scatter; - runner_scatter.SetType("TensorScatterUpdate") - .AddInput(input_scatter_tensor_ori) - .AddInput(ind_reshape_expand_tensor) - .AddInput(input_reshape_tensor) - .AddOutput(input_scatter_tensor); - runner_scatter.Run(stream); - framework::TensorCopy(input_scatter_tensor, ctx.GetPlace(), - ctx.template device_context(), - t_out); - t_out->Resize(framework::make_ddim(trans_shapes)); + Tensor indexs_tmp(indices.type()); + framework::TensorFromVector(indexs_value, ctx.device_context(), + &indexs_tmp); + indexs_tmp.Resize( + framework::make_ddim(std::vector{input_height, 1})); + + Tensor indices_index(indices.type()); + indices_index.mutable_data(indices_tmp.dims(), ctx.GetPlace()); + const auto& runner_add = + NpuOpRunner("Add", {indices_tmp, indexs_tmp}, {indices_index}, {}); + runner_add.Run(stream); + + indices_index.Resize( + framework::make_ddim(std::vector{input_height * input_width})); + + t_out->mutable_data(ctx.GetPlace()); + Tensor out_tmp(t_out->type()); + out_tmp.ShareDataWith(*t_out); + + const auto& runner = + NpuOpRunner("TensorScatterUpdate", {input_tmp, indices_index, input_tmp}, + {out_tmp}, {}); + runner.Run(stream); } -template +template class ArgsortGradNPUKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { @@ -172,75 +161,42 @@ class ArgsortGradNPUKernel : public framework::OpKernel { auto* dX = ctx.Output(framework::GradVarName("X")); auto* dO = ctx.Input(framework::GradVarName("Out")); int axis = ctx.Attr("axis"); + auto in_dims = indices->dims(); axis = (axis < 0) ? (in_dims.size() + axis) : axis; - auto place = ctx.GetPlace(); - - auto stream = - ctx.template device_context() - .stream(); - dX->mutable_data(ctx.GetPlace()); - Tensor dxt; - dxt.mutable_data(dX->dims(), place); - const auto& runner_flatten = - NpuOpRunner(std::string("Flatten"), {*dX}, {dxt}, {}); - runner_flatten.Run(stream); - FillNpuTensorWithConstant(&dxt, static_cast(0)); if (dO->numel() == 0) return; - // Do full assig n - if (axis == -1 || axis + 1 == in_dims.size()) { - const int64_t outer_dim = framework::product( - framework::slice_ddim(in_dims, 0, in_dims.size() - 1)); - const int64_t ind_lastdim = in_dims[in_dims.size() - 1]; - FullAssignNPU(ctx, ind_lastdim, outer_dim, in_dims, dO, - indices, dX); + auto stream = ctx.template device_context().stream(); + + if (axis == -1 || axis + 1 == in_dims.size()) { + FullAssignNPU(ctx, stream, in_dims, *dO, *indices, dX); } else { - // If not full assign do transpose - std::vector trans; - for (int i = 0; i < axis; i++) { - trans.push_back(i); - } - trans.push_back(in_dims.size() - 1); - for (int i = axis + 1; i < in_dims.size() - 1; i++) { - trans.push_back(i); + std::vector perm; + for (int64_t i = 0; i < in_dims.size(); i++) { + perm.emplace_back(i); } - trans.push_back(axis); - framework::DDim trans_dims(in_dims); - for (size_t i = 0; i < trans.size(); i++) { - trans_dims[i] = in_dims[trans[i]]; - } - std::vector axis; - for (size_t i = 0; i < trans.size(); i++) { - axis.push_back(in_dims[trans[i]]); + std::swap(perm[axis], perm[in_dims.size() - 1]); + + std::vector shape; + for (size_t i = 0; i < perm.size(); i++) { + shape.emplace_back(in_dims[perm[i]]); } - framework::NPUAttributeMap attr_input = {{"perm", trans}}; - Tensor trans_dO; - trans_dO.mutable_data(trans_dims, ctx.GetPlace()); - Tensor trans_ind; - trans_ind.mutable_data(trans_dims, ctx.GetPlace()); - // Do transpose - const auto& runner_transpose_dx = NpuOpRunner( - std::string("TransposeD"), {*dO}, {trans_dO}, {attr_input}); - runner_transpose_dx.Run(stream); - const auto& runner_transpose_ind = NpuOpRunner( - std::string("TransposeD"), {*indices}, {trans_ind}, {attr_input}); - runner_transpose_ind.Run(stream); - - const int64_t outer_dim = framework::product( - framework::slice_ddim(trans_dims, 0, trans_dims.size() - 1)); - const int64_t ind_lastdim = trans_dims[trans_dims.size() - 1]; - - Tensor tmp_out; - tmp_out.mutable_data(trans_dims, ctx.GetPlace()); - - FullAssignNPU(ctx, ind_lastdim, outer_dim, trans_dims, - &trans_dO, &trans_ind, &tmp_out); - - // transpose back - const auto& runner_transpose_out = NpuOpRunner( - std::string("TransposeD"), {tmp_out}, {*dX}, {attr_input}); - runner_transpose_out.Run(stream); + auto trans_dims = framework::make_ddim(shape); + + Tensor trans_dout(dO->type()); + Tensor trans_ids(indices->type()); + trans_dout.Resize(trans_dims); + trans_ids.Resize(trans_dims); + + TranposeNPU(ctx, stream, &perm, *dO, &trans_dout); + TranposeNPU(ctx, stream, &perm, *indices, &trans_ids); + + Tensor trans_dx(dO->type()); + trans_dx.Resize(trans_dims); + FullAssignNPU(ctx, stream, trans_dims, trans_dout, trans_ids, + &trans_dx); + + TranposeNPU(ctx, stream, &perm, trans_dx, dX); } } }; @@ -251,11 +207,8 @@ class ArgsortGradNPUKernel : public framework::OpKernel { namespace ops = paddle::operators; namespace plat = paddle::platform; -REGISTER_OP_NPU_KERNEL( - argsort, ops::ArgsortNPUKernel, - ops::ArgsortNPUKernel); +REGISTER_OP_NPU_KERNEL(argsort, ops::ArgsortNPUKernel, + ops::ArgsortNPUKernel); -REGISTER_OP_NPU_KERNEL(argsort_grad, - ops::ArgsortGradNPUKernel, - ops::ArgsortGradNPUKernel); +REGISTER_OP_NPU_KERNEL(argsort_grad, ops::ArgsortGradNPUKernel, + ops::ArgsortGradNPUKernel); diff --git a/paddle/fluid/operators/cumsum_op_npu.cc b/paddle/fluid/operators/cumsum_op_npu.cc index 486e85b0f0..0c0eb1577e 100644 --- a/paddle/fluid/operators/cumsum_op_npu.cc +++ b/paddle/fluid/operators/cumsum_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/operators/cum_op.h" diff --git a/paddle/fluid/operators/dropout_op_npu.cc b/paddle/fluid/operators/dropout_op_npu.cc index b5c8bfff0d..50d247d9c0 100644 --- a/paddle/fluid/operators/dropout_op_npu.cc +++ b/paddle/fluid/operators/dropout_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include #include diff --git a/paddle/fluid/operators/expand_v2_op_npu.cc b/paddle/fluid/operators/expand_v2_op_npu.cc index 85fe86a9e6..4b0e077057 100644 --- a/paddle/fluid/operators/expand_v2_op_npu.cc +++ b/paddle/fluid/operators/expand_v2_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/expand_v2_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/huber_loss_op_npu.cc b/paddle/fluid/operators/huber_loss_op_npu.cc index a942615594..33cbaec4df 100644 --- a/paddle/fluid/operators/huber_loss_op_npu.cc +++ b/paddle/fluid/operators/huber_loss_op_npu.cc @@ -1,13 +1,16 @@ /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/huber_loss_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/interpolate_v2_op_npu.cc b/paddle/fluid/operators/interpolate_v2_op_npu.cc index d893fbd019..b30c7ac810 100644 --- a/paddle/fluid/operators/interpolate_v2_op_npu.cc +++ b/paddle/fluid/operators/interpolate_v2_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/interpolate_v2_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/is_empty_op_npu.cc b/paddle/fluid/operators/is_empty_op_npu.cc index 9155afecd0..01579abd74 100644 --- a/paddle/fluid/operators/is_empty_op_npu.cc +++ b/paddle/fluid/operators/is_empty_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/is_empty_op.h" diff --git a/paddle/fluid/operators/log_loss_op_npu.cc b/paddle/fluid/operators/log_loss_op_npu.cc index a8d906d4b5..74b44165dc 100644 --- a/paddle/fluid/operators/log_loss_op_npu.cc +++ b/paddle/fluid/operators/log_loss_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/log_loss_op.h" #include diff --git a/paddle/fluid/operators/meshgrid_op_npu.cc b/paddle/fluid/operators/meshgrid_op_npu.cc index 9605fa092f..f22e2e178e 100644 --- a/paddle/fluid/operators/meshgrid_op_npu.cc +++ b/paddle/fluid/operators/meshgrid_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/meshgrid_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/pad3d_op_npu.cc b/paddle/fluid/operators/pad3d_op_npu.cc index 3a1fba9455..483c895e0e 100644 --- a/paddle/fluid/operators/pad3d_op_npu.cc +++ b/paddle/fluid/operators/pad3d_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" diff --git a/paddle/fluid/operators/reduce_ops/reduce_max_op_npu.cc b/paddle/fluid/operators/reduce_ops/reduce_max_op_npu.cc index b343fc88d7..5efc7e9b86 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_max_op_npu.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_max_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/npu_op_runner.h" #include "paddle/fluid/operators/reduce_ops/reduce_min_max_op.h" diff --git a/paddle/fluid/operators/reduce_ops/reduce_prod_op_npu.cc b/paddle/fluid/operators/reduce_ops/reduce_prod_op_npu.cc index 834b63f199..b5f571c7fe 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_prod_op_npu.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_prod_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/reduce_ops/reduce_prod_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op_npu.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op_npu.cc index 6f3b40dbbf..400a09330a 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op_npu.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/slice_op_npu.cc b/paddle/fluid/operators/slice_op_npu.cc index 52351a98bc..a9092d7e2a 100644 --- a/paddle/fluid/operators/slice_op_npu.cc +++ b/paddle/fluid/operators/slice_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/slice_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/paddle/fluid/operators/tril_triu_op_npu.cc b/paddle/fluid/operators/tril_triu_op_npu.cc index cdabc28255..6e7e039113 100644 --- a/paddle/fluid/operators/tril_triu_op_npu.cc +++ b/paddle/fluid/operators/tril_triu_op_npu.cc @@ -10,7 +10,7 @@ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and -limitations under the Licnse. */ +limitations under the License. */ #include "paddle/fluid/operators/tril_triu_op.h" #include "paddle/fluid/operators/npu_op_runner.h" diff --git a/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py index 824266578b..2589b2a316 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_argsort_op_npu.py @@ -18,7 +18,7 @@ import numpy as np import unittest import sys sys.path.append("..") -from op_test import OpTest, _set_use_system_allocator +from op_test import OpTest import paddle import paddle.fluid as fluid import paddle.fluid.core as core @@ -63,9 +63,6 @@ class TestArgsortOp(OpTest): self.__class__.use_npu = True self.__class__.no_need_check_grad = True - def init_kernel_type(self): - self.use_mkldnn = False - def init_inputshape(self): self.input_shape = (2, 2, 2, 3, 3) @@ -158,7 +155,8 @@ class TestArgsortOpAxis0NPUFP32(TestArgsortOp): self.__class__.use_npu = True def test_check_grad(self): - self.check_grad_with_place(self.place, ["X"], "Out") + self.check_grad_with_place( + self.place, ["X"], "Out", max_relative_error=0.03) class TestArgsortOpAxis1NPUFP32(TestArgsortOpAxis0NPUFP32): -- GitLab