From a17d7be354cf0af0a355cfdeec847d59f77a1f13 Mon Sep 17 00:00:00 2001 From: Wangzheee <634486483@qq.com> Date: Wed, 9 Sep 2020 14:57:12 +0800 Subject: [PATCH] [OP][HOST][KERNEL]Add uniform_random operator, and add host kernel for uniform_random. test=develop (#4264) --- lite/kernels/host/CMakeLists.txt | 1 + lite/kernels/host/uniform_random_compute.cc | 76 +++++++++++++++++++ lite/kernels/host/uniform_random_compute.h | 37 +++++++++ lite/operators/op_params.h | 1 + lite/operators/uniform_random_op.cc | 24 +++++- lite/tests/kernels/CMakeLists.txt | 1 + .../kernels/uniform_random_compute_test.cc | 62 +++++++++++++++ 7 files changed, 201 insertions(+), 1 deletion(-) create mode 100644 lite/kernels/host/uniform_random_compute.cc create mode 100644 lite/kernels/host/uniform_random_compute.h create mode 100644 lite/tests/kernels/uniform_random_compute_test.cc diff --git a/lite/kernels/host/CMakeLists.txt b/lite/kernels/host/CMakeLists.txt index 0cc7b5b302..3cbc585d78 100644 --- a/lite/kernels/host/CMakeLists.txt +++ b/lite/kernels/host/CMakeLists.txt @@ -27,6 +27,7 @@ add_kernel(conditional_block_compute_host Host extra SRCS conditional_block_comp add_kernel(activation_grad_compute_host Host train SRCS activation_grad_compute.cc DEPS ${lite_kernel_deps}) add_kernel(pixel_shuffle_compute_host Host extra SRCS pixel_shuffle_compute.cc DEPS ${lite_kernel_deps}) add_kernel(one_hot_compute_host Host extra SRCS one_hot_compute.cc DEPS ${lite_kernel_deps}) +add_kernel(uniform_random_compute_host Host extra SRCS uniform_random_compute.cc DEPS ${lite_kernel_deps}) if(LITE_BUILD_EXTRA AND LITE_WITH_x86) lite_cc_test(test_where_index_compute_host SRCS where_index_compute.cc DEPS where_index_compute_host) diff --git a/lite/kernels/host/uniform_random_compute.cc b/lite/kernels/host/uniform_random_compute.cc new file mode 100644 index 0000000000..a9cca3aad2 --- /dev/null +++ b/lite/kernels/host/uniform_random_compute.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "lite/kernels/host/uniform_random_compute.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +template +void UniformRandomKernelFunctor(Tensor* out, float min, float max, int seed) { + T* p_out_data = out->mutable_data(); + int64_t size = out->numel(); + memset(p_out_data, 0, size * sizeof(T)); + unsigned int out_seed = static_cast(seed); + std::minstd_rand engine; + if (out_seed == 0) { + out_seed = std::random_device()(); + } + engine.seed(out_seed); + std::uniform_real_distribution dist(static_cast(min), + static_cast(max)); + for (int64_t i = 0; i < size; ++i) { + p_out_data[i] = dist(engine); + } +} + +void UniformRandomCompute::Run() { + auto& param = this->template Param(); + switch (param.dtype) { + case static_cast(lite::core::FluidType::FP64): + UniformRandomKernelFunctor( + param.Out, param.min, param.max, param.seed); + break; + case static_cast(lite::core::FluidType::FP32): + UniformRandomKernelFunctor( + param.Out, param.min, param.max, param.seed); + break; + default: + LOG(ERROR) << "Unsupported data type for uniform_random op:" + << param.dtype; + } +} + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle + +REGISTER_LITE_KERNEL(uniform_random, + kHost, + kAny, + kAny, + paddle::lite::kernels::host::UniformRandomCompute, + def) + .BindInput("ShapeTensor", + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) + .BindOutput("Out", + {LiteType::GetTensorTy(TARGET(kHost), + PRECISION(kAny), + DATALAYOUT(kAny))}) + .Finalize(); diff --git a/lite/kernels/host/uniform_random_compute.h b/lite/kernels/host/uniform_random_compute.h new file mode 100644 index 0000000000..884a466ce6 --- /dev/null +++ b/lite/kernels/host/uniform_random_compute.h @@ -0,0 +1,37 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once +#include +#include "lite/core/kernel.h" +#include "lite/core/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +class UniformRandomCompute + : public KernelLite { + public: + using param_t = operators::UniformRandomParam; + + void Run() override; + + virtual ~UniformRandomCompute() = default; +}; + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/lite/operators/op_params.h b/lite/operators/op_params.h index 586d3d1183..98b08a6b0d 100644 --- a/lite/operators/op_params.h +++ b/lite/operators/op_params.h @@ -747,6 +747,7 @@ struct SGDParam : ParamBase { /// ----------------------- uniform_random operators ---------------------- struct UniformRandomParam : ParamBase { + const lite::Tensor* X{nullptr}; std::vector shape{}; float min{-1.0f}; float max{1.0f}; diff --git a/lite/operators/uniform_random_op.cc b/lite/operators/uniform_random_op.cc index 512648bfe4..602416c80a 100644 --- a/lite/operators/uniform_random_op.cc +++ b/lite/operators/uniform_random_op.cc @@ -23,7 +23,25 @@ namespace operators { bool UniformRandomOpLite::CheckShape() const { return true; } bool UniformRandomOpLite::InferShapeImpl() const { - param_.Out->Resize(param_.shape); + if (param_.X) { + if (param_.X->precision() == PrecisionType::kInt64) { + auto* new_data = param_.X->data(); + std::vector new_shape(new_data, new_data + param_.X->numel()); + param_.Out->Resize(new_shape); + } else if (param_.X->precision() == PrecisionType::kInt32) { + std::vector new_shape; + auto* new_data = param_.X->data(); + for (int i = 0; i < param_.X->numel(); ++i) { + new_shape.push_back(static_cast(*(new_data + i))); + } + param_.Out->Resize(new_shape); + } else { + LOG(ERROR) << "The dtype of shape tensor must be int32 or int64."; + } + } else { + auto new_shape = param_.shape; + param_.Out->Resize(new_shape); + } return true; } @@ -34,6 +52,10 @@ bool UniformRandomOpLite::AttachImpl(const cpp::OpDesc& opdesc, param_.max = opdesc.GetAttr("max"); param_.seed = opdesc.GetAttr("seed"); param_.dtype = opdesc.GetAttr("dtype"); + if (opdesc.HasInput("ShapeTensor")) { + auto X = opdesc.Input("ShapeTensor").front(); + param_.X = scope->FindVar(X)->GetMutable(); + } param_.Out = GetMutableVar(scope, opdesc.Output("Out").front()); return true; } diff --git a/lite/tests/kernels/CMakeLists.txt b/lite/tests/kernels/CMakeLists.txt index f9a5e3e40e..c2f0c2ba91 100644 --- a/lite/tests/kernels/CMakeLists.txt +++ b/lite/tests/kernels/CMakeLists.txt @@ -93,4 +93,5 @@ endif() lite_cc_test(test_kernel_matmul_compute SRCS matmul_compute_test.cc DEPS arena_framework ${xpu_kernels} ${npu_kernels} ${huawei_ascend_npu_kernels} ${bm_kernels} ${x86_kernels} ${cuda_kernels} ${arm_kernels} ${lite_ops} ${host_kernels}) lite_cc_test(test_kernel_flatten_compute SRCS flatten_compute_test.cc DEPS arena_framework ${xpu_kernels} ${npu_kernels} ${huawei_ascend_npu_kernels} ${bm_kernels} ${x86_kernels} ${cuda_kernels} ${arm_kernels} ${lite_ops} ${host_kernels}) #lite_cc_test(test_kernel_crf_decoding_compute SRCS crf_decoding_compute_test.cc DEPS arena_framework ${xpu_kernels} ${npu_kernels} ${huawei_ascend_npu_kernels} ${bm_kernels} ${x86_kernels} ${cuda_kernels} ${arm_kernels} ${lite_ops} ${host_kernels}) + lite_cc_test(test_uniform_random_compute SRCS uniform_random_compute_test.cc DEPS arena_framework ${lite_ops} ${host_kernels}) endif() diff --git a/lite/tests/kernels/uniform_random_compute_test.cc b/lite/tests/kernels/uniform_random_compute_test.cc new file mode 100644 index 0000000000..6bf990298c --- /dev/null +++ b/lite/tests/kernels/uniform_random_compute_test.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// #include "lite/kernels/host/uniform_random_compute.h" +// #include +#include "lite/kernels/host/uniform_random_compute.h" +#include +#include +#include "lite/core/context.h" +#include "lite/core/profile/timer.h" +#include "lite/operators/op_params.h" +#include "lite/tests/utils/naive_math_impl.h" +#include "lite/tests/utils/tensor_utils.h" +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +TEST(uniformrandom, test) { + using T = double; + std::vector shape(2, 3); + float min = -5.0f; + float max = 10.0f; + int seed = 0; + int dtype = static_cast(VarDescAPI::VarDataType::FP64); + lite::Tensor x, out; + x.Resize({1, 2}); + auto* x_data = x.mutable_data(); + x_data[0] = 2; + x_data[1] = 2; + out.Resize({x_data[0], x_data[1]}); + UniformRandomCompute uniform_random; + paddle::lite::operators::UniformRandomParam param; + param.X = &x; + param.shape = shape; + param.min = min; + param.max = max; + param.seed = seed; + param.dtype = dtype; + param.Out = &out; + uniform_random.SetParam(param); + uniform_random.Run(); + const double* outdata = out.data(); + for (int i = 0; i < out.numel(); i++) { + LOG(INFO) << "out.data: " << outdata[i]; + } +} + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle -- GitLab