From 04b2c9fa82757e175e9924b1109b0daf80ce4a25 Mon Sep 17 00:00:00 2001 From: huzhiqiang <912790387@qq.com> Date: Wed, 12 Aug 2020 16:01:07 +0800 Subject: [PATCH] [Op] Add one_hot op for host backend (#4093) --- lite/kernels/host/CMakeLists.txt | 4 +- lite/kernels/host/one_hot_compute.cc | 90 +++++++++++++++++++++++ lite/kernels/host/one_hot_compute.h | 38 ++++++++++ lite/kernels/host/one_hot_compute_test.cc | 88 ++++++++++++++++++++++ lite/operators/CMakeLists.txt | 3 +- lite/operators/one_hot_op.cc | 64 ++++++++++++++++ lite/operators/one_hot_op.h | 71 ++++++++++++++++++ lite/operators/one_hot_op_test.cc | 59 +++++++++++++++ lite/operators/op_params.h | 9 +++ 9 files changed, 424 insertions(+), 2 deletions(-) create mode 100644 lite/kernels/host/one_hot_compute.cc create mode 100644 lite/kernels/host/one_hot_compute.h create mode 100644 lite/kernels/host/one_hot_compute_test.cc create mode 100644 lite/operators/one_hot_op.cc create mode 100644 lite/operators/one_hot_op.h create mode 100644 lite/operators/one_hot_op_test.cc diff --git a/lite/kernels/host/CMakeLists.txt b/lite/kernels/host/CMakeLists.txt index 4b082a92e9..1c0914de73 100644 --- a/lite/kernels/host/CMakeLists.txt +++ b/lite/kernels/host/CMakeLists.txt @@ -23,7 +23,9 @@ add_kernel(print_compute_host Host extra SRCS print_compute.cc DEPS ${lite_kerne add_kernel(while_compute_host Host extra SRCS while_compute.cc DEPS ${lite_kernel_deps} program) add_kernel(conditional_block_compute_host Host extra SRCS conditional_block_compute.cc DEPS ${lite_kernel_deps} program) add_kernel(activation_grad_compute_host Host train SRCS activation_grad_compute.cc DEPS ${lite_kernel_deps}) +add_kernel(one_hot_compute_host Host extra SRCS one_hot_compute.cc DEPS ${lite_kernel_deps}) -if(LITE_BUILD_EXTRA) +if(LITE_BUILD_EXTRA AND LITE_WITH_x86) lite_cc_test(test_where_index_compute_host SRCS where_index_compute.cc DEPS where_index_compute_host) + lite_cc_test(test_one_hot_compute_host SRCS one_hot_compute_test.cc DEPS one_hot_compute_host) endif() diff --git a/lite/kernels/host/one_hot_compute.cc b/lite/kernels/host/one_hot_compute.cc new file mode 100644 index 0000000000..6880de39ae --- /dev/null +++ b/lite/kernels/host/one_hot_compute.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lite/kernels/host/one_hot_compute.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +template +void OneHotKernelFunctor(const Tensor* in, + Tensor* out, + int depth, + bool allow_out_of_range = false) { + auto* p_in_data = in->data(); + auto numel = in->numel(); + auto* p_out_data = out->mutable_data(); + memset(p_out_data, 0, out->numel() * sizeof(T)); + if (allow_out_of_range) { + for (int i = 0; i < numel; ++i) { + if (p_in_data[i] >= 0 && p_in_data[i] < depth) { + p_out_data[i * depth + static_cast(p_in_data[i])] = 1.0; + } + } + } else { + for (int i = 0; i < numel; ++i) { + CHECK_GE(p_in_data[i], 0) << "Illegal index value, Input(input) value " + "should be at least 0, but received input (" + << p_in_data[i] << ") less than 0"; + CHECK_LE(p_in_data[i], depth) + << "Illegal index value, Input(input) value should be less than " + "Input(depth), but received input (" + << p_in_data[i] << ") not less than depth (" << depth << ")"; + p_out_data[i * depth + static_cast(p_in_data[i])] = 1.0; + } + } +} + +void OneHotCompute::Run() { + auto& param = this->template Param(); + switch (param.dtype) { + case static_cast(lite::core::FluidType::INT64): + OneHotKernelFunctor( + param.X, param.Out, param.depth, param.allow_out_of_range); + break; + case static_cast(lite::core::FluidType::INT32): + OneHotKernelFunctor( + param.X, param.Out, param.depth, param.allow_out_of_range); + break; + case static_cast(lite::core::FluidType::FP32): + OneHotKernelFunctor( + param.X, param.Out, param.depth, param.allow_out_of_range); + break; + default: + LOG(ERROR) << "Unsupported data type for one_hot op:" << param.dtype; + } +} + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle + +REGISTER_LITE_KERNEL( + one_hot, kHost, kAny, kAny, paddle::lite::kernels::host::OneHotCompute, def) + .BindInput("X", + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) + .BindInput("depth_tensor", + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) + .BindOutput("Out", + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) + .Finalize(); diff --git a/lite/kernels/host/one_hot_compute.h b/lite/kernels/host/one_hot_compute.h new file mode 100644 index 0000000000..6c94d900a7 --- /dev/null +++ b/lite/kernels/host/one_hot_compute.h @@ -0,0 +1,38 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "lite/core/kernel.h" +#include "lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +class OneHotCompute + : public KernelLite { + public: + using param_t = operators::OneHotParam; + + void Run() override; + + virtual ~OneHotCompute() = default; +}; + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/lite/kernels/host/one_hot_compute_test.cc b/lite/kernels/host/one_hot_compute_test.cc new file mode 100644 index 0000000000..d767b35851 --- /dev/null +++ b/lite/kernels/host/one_hot_compute_test.cc @@ -0,0 +1,88 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include +#include +#include +#include +#include +#include + +#include "lite/core/op_registry.h" +#include "lite/kernels/host/one_hot_compute.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +/* note: +One Hot Operator. This operator creates the one-hot representations for input +index values. The following example will help to explain the function of this +operator: +X is a LoDTensor: + X.lod = [[0, 1, 4]] + X.shape = [4, 1] + X.data = [[1], [1], [3], [0]] +set depth = 4 +Out is a LoDTensor: + Out.lod = [[0, 1, 4]] + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] */ +TEST(one_hot, test) { + using T = float; + + lite::Tensor x, out; + x.Resize({4, 1}); + out.Resize({4, 4}); + + auto* x_data = x.mutable_data(); + x_data[0] = 1; + x_data[1] = 1; + x_data[2] = 3; + x_data[3] = 0; + auto* out_data = out.mutable_data(); + float out_ref[4][4] = { + {0, 1, 0, 0}, {0, 1, 0, 0}, {0, 0, 0, 1}, {1, 0, 0, 0}}; + + OneHotCompute one_hot; + operators::OneHotParam param; + + param.X = &x; + param.Out = &out; + param.depth = 4; + // static_cast(lite::core::FluidType::FP32) = 5; + param.dtype = 5; + + one_hot.SetParam(param); + one_hot.PrepareForRun(); + + one_hot.Run(); + + for (int i = 0; i < out.numel(); i++) { + EXPECT_NEAR(out_data[i], out_ref[i], 1e-5); + } +} + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle + +USE_LITE_KERNEL(one_hot, kHost, kAny, kAny, def); diff --git a/lite/operators/CMakeLists.txt b/lite/operators/CMakeLists.txt index c8d8f2133b..f84ce5cf67 100644 --- a/lite/operators/CMakeLists.txt +++ b/lite/operators/CMakeLists.txt @@ -147,6 +147,7 @@ add_operator(layer_norm_op extra SRCS layer_norm_op.cc DEPS ${op_DEPS}) add_operator(sequence_softmax_op extra SRCS sequence_softmax_op.cc DEPS ${op_DEPS}) add_operator(retinanet_detection_output_op extra SRCS retinanet_detection_output_op.cc DEPS ${op_DEPS}) add_operator(where_index_op extra SRCS where_index_op.cc DEPS ${op_DEPS}) +add_operator(one_hot_op extra SRCS one_hot_op.cc DEPS ${op_DEPS}) # for content-dnn specific add_operator(search_aligned_mat_mul_op extra SRCS search_aligned_mat_mul_op.cc DEPS ${op_DEPS}) add_operator(search_seq_fc_op extra SRCS search_seq_fc_op.cc DEPS ${op_DEPS}) @@ -175,7 +176,7 @@ add_operator(__xpu__fc_op extra SRCS __xpu__fc_op.cc DEPS ${op_DEPS}) add_operator(__xpu__resnet_cbam_op extra SRCS __xpu__resnet_cbam_op.cc DEPS ${op_DEPS}) add_operator(__xpu__search_attention_op extra SRCS __xpu__search_attention_op.cc DEPS ${op_DEPS}) add_operator(__xpu__mmdnn_op extra SRCS __xpu__mmdnn_op.cc DEPS ${op_DEPS}) - +lite_cc_test(test_one_hot_op SRCS one_hot_op_test.cc DEPS one_hot_op memory scope ${op_deps} one_hot_compute_host) if (NOT LITE_WITH_X86) lite_cc_test(test_fc_op SRCS fc_op_test.cc DEPS fc_op memory diff --git a/lite/operators/one_hot_op.cc b/lite/operators/one_hot_op.cc new file mode 100644 index 0000000000..88b939a0de --- /dev/null +++ b/lite/operators/one_hot_op.cc @@ -0,0 +1,64 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lite/operators/one_hot_op.h" +#include "lite/core/op_registry.h" +namespace paddle { +namespace lite { +namespace operators { + +bool OneHotOp::CheckShape() const { + CHECK_OR_FALSE(param_.X); + CHECK_OR_FALSE(param_.Out); + return true; +} + +bool OneHotOp::InferShapeImpl() const { + auto out_dims = param_.X->dims(); + CHECK_GE(out_dims.size(), 2); + int depth = param_.depth_tensor ? param_.depth + : param_.depth_tensor->data()[0]; + out_dims[out_dims.size() - 1] = depth; + param_.Out->Resize(out_dims); + param_.Out->set_lod(param_.X->lod()); + return true; +} + +bool OneHotOp::AttachImpl(const cpp::OpDesc &op_desc, lite::Scope *scope) { + auto x = op_desc.Input("X").front(); + auto out = op_desc.Output("Out").front(); + param_.X = scope->FindVar(x)->GetMutable(); + param_.Out = scope->FindMutableTensor(out); + + if (op_desc.HasInput("depth_tensor") && + !op_desc.Input("depth_tensor").empty()) { + auto depth_tensor = op_desc.Input("depth_tensor").front(); + param_.depth_tensor = scope->FindVar(depth_tensor)->GetMutable(); + } + + if (op_desc.HasAttr("depth")) { + param_.depth = op_desc.GetAttr("depth"); + } + if (op_desc.HasAttr("allow_out_of_range")) { + param_.allow_out_of_range = op_desc.GetAttr("allow_out_of_range"); + } + param_.dtype = op_desc.GetAttr("dtype"); + return true; +} + +} // namespace operators +} // namespace lite +} // namespace paddle + +REGISTER_LITE_OP(one_hot, paddle::lite::operators::OneHotOp); diff --git a/lite/operators/one_hot_op.h b/lite/operators/one_hot_op.h new file mode 100644 index 0000000000..bd0aefc330 --- /dev/null +++ b/lite/operators/one_hot_op.h @@ -0,0 +1,71 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "lite/core/op_lite.h" +#include "lite/core/scope.h" +#include "lite/utils/all.h" + +namespace paddle { +namespace lite { +namespace operators { +/* note: +One Hot Operator. This operator creates the one-hot representations for input +index values. The following example will help to explain the function of this +operator: +X is a LoDTensor: + X.lod = [[0, 1, 4]] + X.shape = [4, 1] + X.data = [[1], [1], [3], [0]] +set depth = 4 +Out is a LoDTensor: + Out.lod = [[0, 1, 4]] + Out.shape = [4, 4] + Out.data = [[0., 1., 0., 0.], + [0., 1., 0., 0.], + [0., 0., 0., 1.], + [1., 0., 0., 0.]] */ + +class OneHotOp : public OpLite { + public: + OneHotOp() {} + explicit OneHotOp(const std::string &op_type) : OpLite(op_type) {} + + bool CheckShape() const override; + + bool InferShapeImpl() const override; + + bool AttachImpl(const cpp::OpDesc &opdesc, lite::Scope *scope) override; + + void AttachKernel(KernelBase *kernel) override { kernel->SetParam(param_); } + + std::string DebugString() const override { return "one_hot"; } + +#ifdef LITE_WITH_PROFILE + void GetOpRuntimeInfo(paddle::lite::profile::OpCharacter *ch) { + ch->input_shape = ch->DimToStr(param_.X->dims()); + ch->output_shape = ch->DimToStr(param_.Out->dims()); + ch->macs = param_.X->numel() * 1.f; + } +#endif + + private: + mutable OneHotParam param_; +}; + +} // namespace operators +} // namespace lite +} // namespace paddle diff --git a/lite/operators/one_hot_op_test.cc b/lite/operators/one_hot_op_test.cc new file mode 100644 index 0000000000..5daa837886 --- /dev/null +++ b/lite/operators/one_hot_op_test.cc @@ -0,0 +1,59 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lite/operators/one_hot_op.h" +#include +#include "lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace operators { + +TEST(one_hot_op_lite, TestHost) { + // prepare variables + Scope scope; + auto* x = scope.Var("X")->GetMutable(); + auto* depth_tensor = scope.Var("depth_tensor")->GetMutable(); + auto* output = scope.Var("Out")->GetMutable(); + depth_tensor->dims(); + output->dims(); + + // set data + x->Resize(DDim(std::vector({4, 1}))); + auto* x_data = x->mutable_data(); + x_data[0] = 1; + x_data[1] = 1; + x_data[2] = 3; + x_data[3] = 0; + + // prepare op desc + cpp::OpDesc desc; + desc.SetType("one_hot"); + desc.SetInput("X", {"X"}); + desc.SetInput("depth_tensor", {"depth_tensor"}); + desc.SetOutput("Out", {"Out"}); + desc.SetAttr("depth", static_cast(4)); + desc.SetAttr("dtype", static_cast(1)); + desc.SetAttr("allow_out_of_range", static_cast(0)); + OneHotOp one_hot("one_hot"); + one_hot.SetValidPlaces({Place{TARGET(kHost), PRECISION(kAny)}}); + one_hot.Attach(desc, &scope); + auto kernels = one_hot.CreateKernels({Place{TARGET(kHost), PRECISION(kAny)}}); + ASSERT_FALSE(kernels.empty()); +} + +} // namespace operators +} // namespace lite +} // namespace paddle +USE_LITE_KERNEL(one_hot, kHost, kAny, kAny, def); diff --git a/lite/operators/op_params.h b/lite/operators/op_params.h index 1f37aaa5e7..dffa151884 100644 --- a/lite/operators/op_params.h +++ b/lite/operators/op_params.h @@ -1824,6 +1824,15 @@ struct PrintParam : ParamBase { bool is_forward{true}; }; +struct OneHotParam : ParamBase { + const lite::Tensor* X{}; + const lite::Tensor* depth_tensor{nullptr}; + lite::Tensor* Out{}; + int depth; + int dtype; + bool allow_out_of_range; +}; + } // namespace operators } // namespace lite } // namespace paddle -- GitLab