diff --git a/lite/kernels/x86/activation_compute.cc b/lite/kernels/x86/activation_compute.cc index 9b4c2fadd9ce427db272a9bb0cfd0e0a10716f11..aee6bd6bd3f41972e759fb2b87fb1b1c549975e2 100644 --- a/lite/kernels/x86/activation_compute.cc +++ b/lite/kernels/x86/activation_compute.cc @@ -88,3 +88,14 @@ REGISTER_LITE_KERNEL(sigmoid, .BindInput("X", {LiteType::GetTensorTy(TARGET(kX86))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kX86))}) .Finalize(); + +// float +REGISTER_LITE_KERNEL(relu6, + kX86, + kFloat, + kNCHW, + paddle::lite::kernels::x86::Relu6Compute, + def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kX86))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kX86))}) + .Finalize(); diff --git a/lite/kernels/x86/activation_compute.h b/lite/kernels/x86/activation_compute.h index 520adaf44f808748c75960f88cd07799c9f2d4ed..b76e94398e6824759372bc5eb91ed3cea8acaf6e 100644 --- a/lite/kernels/x86/activation_compute.h +++ b/lite/kernels/x86/activation_compute.h @@ -248,6 +248,42 @@ class SoftsignCompute : public KernelLite { virtual ~SoftsignCompute() = default; }; +// relu6(x) = min(max(0, x), 6) +template +struct Relu6Functor { + float threshold; + explicit Relu6Functor(float threshold_) : threshold(threshold_) {} + + template + void operator()(Device d, X x, Out out) const { + out.device(d) = + x.cwiseMax(static_cast(0)).cwiseMin(static_cast(threshold)); + } +}; + +template +class Relu6Compute : public KernelLite { + public: + using param_t = operators::ActivationParam; + + void Run() override { + auto& param = *param_.get_mutable(); + + param.Out->template mutable_data(); + auto X = param.X; + auto Out = param.Out; + auto place = lite::fluid::EigenDeviceType(); + CHECK(X); + CHECK(Out); + auto x = lite::fluid::EigenVector::Flatten(*X); + auto out = lite::fluid::EigenVector::Flatten(*Out); + Relu6Functor functor(param.threshold); + functor(place, x, out); + } + + virtual ~Relu6Compute() = default; +}; + } // namespace x86 } // namespace kernels } // namespace lite diff --git a/lite/kernels/x86/reduce_compute.cc b/lite/kernels/x86/reduce_compute.cc index f95f4cfb881fef329ea940ca8b9fa6b4fd6ff7b6..edeac0a84eb60ca1e34ab6e7437e54ffe8922815 100644 --- a/lite/kernels/x86/reduce_compute.cc +++ b/lite/kernels/x86/reduce_compute.cc @@ -23,3 +23,13 @@ REGISTER_LITE_KERNEL(reduce_sum, .BindInput("X", {LiteType::GetTensorTy(TARGET(kX86))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kX86))}) .Finalize(); + +REGISTER_LITE_KERNEL(reduce_mean, + kX86, + kFloat, + kNCHW, + paddle::lite::kernels::x86::ReduceMeanCompute, + def) + .BindInput("X", {LiteType::GetTensorTy(TARGET(kX86))}) + .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kX86))}) + .Finalize(); diff --git a/lite/kernels/x86/reduce_compute.h b/lite/kernels/x86/reduce_compute.h index 1b7c99eeef9dd80525eb9ed249bdf6ed1e493443..fb02348759014578a1cf7a17c27903ce84dfe54b 100644 --- a/lite/kernels/x86/reduce_compute.h +++ b/lite/kernels/x86/reduce_compute.h @@ -31,11 +31,18 @@ struct SumFunctor { } }; -#define HANDLE_DIM(NDIM, RDIM) \ - if (ndim == NDIM && rdim == RDIM) { \ - paddle::lite::kernels::x86:: \ - ReduceFunctor( \ - *input, output, dims, keep_dim); \ +struct MeanFunctor { + template + void operator()(X* x, Y* y, const Dim& dim) { + y->device(lite::fluid::EigenDeviceType()) = x->mean(dim); + } +}; + +#define HANDLE_DIM(NDIM, RDIM, FUNCTOR) \ + if (ndim == NDIM && rdim == RDIM) { \ + paddle::lite::kernels::x86:: \ + ReduceFunctor( \ + *input, output, dims, keep_dim); \ } template @@ -64,19 +71,58 @@ class ReduceSumCompute : public KernelLite { } else { int ndim = input->dims().size(); int rdim = dims.size(); - HANDLE_DIM(4, 3); - HANDLE_DIM(4, 2); - HANDLE_DIM(4, 1); - HANDLE_DIM(3, 2); - HANDLE_DIM(3, 1); - HANDLE_DIM(2, 1); - HANDLE_DIM(1, 1); + HANDLE_DIM(4, 3, SumFunctor); + HANDLE_DIM(4, 2, SumFunctor); + HANDLE_DIM(4, 1, SumFunctor); + HANDLE_DIM(3, 2, SumFunctor); + HANDLE_DIM(3, 1, SumFunctor); + HANDLE_DIM(2, 1, SumFunctor); + HANDLE_DIM(1, 1, SumFunctor); } } virtual ~ReduceSumCompute() = default; }; +template +class ReduceMeanCompute : public KernelLite { + public: + using param_t = operators::ReduceParam; + + void Run() override { + auto& param = *param_.get_mutable(); + // auto& context = ctx_->As(); + auto* input = param.x; + auto* output = param.output; + param.output->template mutable_data(); + + const auto& dims = param.dim; + bool keep_dim = param.keep_dim; + + if (dims.size() == 0) { + // Flatten and reduce 1-D tensor + auto x = lite::fluid::EigenVector::Flatten(*input); + auto out = lite::fluid::EigenScalar::From(output); + // auto& place = *platform::CPUDeviceContext().eigen_device(); + auto reduce_dim = Eigen::array({{0}}); + MeanFunctor functor; + functor(&x, &out, reduce_dim); + } else { + int ndim = input->dims().size(); + int rdim = dims.size(); + HANDLE_DIM(4, 3, MeanFunctor); + HANDLE_DIM(4, 2, MeanFunctor); + HANDLE_DIM(4, 1, MeanFunctor); + HANDLE_DIM(3, 2, MeanFunctor); + HANDLE_DIM(3, 1, MeanFunctor); + HANDLE_DIM(2, 1, MeanFunctor); + HANDLE_DIM(1, 1, MeanFunctor); + } + } + + virtual ~ReduceMeanCompute() = default; +}; + } // namespace x86 } // namespace kernels } // namespace lite diff --git a/lite/operators/activation_ops.cc b/lite/operators/activation_ops.cc index 9b20f4348b4090abfb2138547915e44f7c3418c0..a25297f01206dd157484c720d6dd134186d2a7bd 100644 --- a/lite/operators/activation_ops.cc +++ b/lite/operators/activation_ops.cc @@ -89,6 +89,9 @@ bool ActivationOp::AttachImpl(const cpp::OpDesc& opdesc, lite::Scope* scope) { } else if (opdesc.Type() == "elu") { param_.active_type = lite_api::ActivationType::kElu; param_.Elu_alpha = opdesc.GetAttr("alpha"); + } else if (opdesc.Type() == "relu6") { + param_.active_type = lite_api::ActivationType::kRelu6; + param_.threshold = opdesc.GetAttr("threshold"); } VLOG(4) << "opdesc.Type():" << opdesc.Type(); diff --git a/lite/operators/op_params.h b/lite/operators/op_params.h index 33da913d2e13d290ef42a40955c7cdc13fd855b3..85d78549706eec8be4ee0029acffd1633e1fb27c 100644 --- a/lite/operators/op_params.h +++ b/lite/operators/op_params.h @@ -403,6 +403,8 @@ struct ActivationParam : ParamBase { float relu_threshold{1.0f}; // elu float Elu_alpha{1.0f}; + // relu6 + float threshold{6.0f}; /////////////////////////////////////////////////////////////////////////////////// // get a vector of input tensors diff --git a/lite/tests/kernels/activation_compute_test.cc b/lite/tests/kernels/activation_compute_test.cc index fb88f6b553f6eac88845a045531cfe57c174bedf..6799da30da3135b49fd4c423ee094b3c22a73bcb 100644 --- a/lite/tests/kernels/activation_compute_test.cc +++ b/lite/tests/kernels/activation_compute_test.cc @@ -58,6 +58,7 @@ class ActivationComputeTester : public arena::TestCase { float hard_swish_offset = 3.0; float relu_threshold_ = 1.0; float elu_alpha_ = 1.0; + float threshold_ = 6.0; DDim dims_{{1}}; std::string type_ = ""; activation_type_test act_type_ = RELU; @@ -170,7 +171,8 @@ class ActivationComputeTester : public arena::TestCase { case RELU6: { for (int i = 0; i < dims_.production(); i++) { output_data[i] = x_data[i] > 0.f ? x_data[i] : 0.f; - output_data[i] = output_data[i] < 6.0 ? output_data[i] : 6.0; + output_data[i] = + output_data[i] < threshold_ ? output_data[i] : threshold_; } break; } @@ -273,6 +275,9 @@ class ActivationComputeTester : public arena::TestCase { if (act_type_ == ELU) { op_desc->SetAttr("alpha", elu_alpha_); } + if (act_type_ == RELU6) { + op_desc->SetAttr("threshold", threshold_); + } } void PrepareData() override { @@ -510,6 +515,8 @@ TEST(Activation_relu6, precision) { #elif defined(LITE_WITH_HUAWEI_ASCEND_NPU) place = TARGET(kHuaweiAscendNPU); abs_error = 1e-2; // precision_mode default is force_fp16 +#elif defined(LITE_WITH_X86) + place = TARGET(kX86); #else return; #endif diff --git a/lite/tests/kernels/reduce_mean_compute_test.cc b/lite/tests/kernels/reduce_mean_compute_test.cc index 0d41d251799d3506c77686b4ab9b48e6b1a105d7..d679d027a68735b49255f2c08dfa566a0f50e088 100644 --- a/lite/tests/kernels/reduce_mean_compute_test.cc +++ b/lite/tests/kernels/reduce_mean_compute_test.cc @@ -333,9 +333,10 @@ void test_reduce_mean(Place place) { } TEST(ReduceMean, precision) { -// #ifdef LITE_WITH_X86 -// Place place(TARGET(kX86)); -// #endif +#ifdef LITE_WITH_X86 + Place place(TARGET(kX86)); + test_reduce_mean(place); +#endif #ifdef LITE_WITH_ARM Place place(TARGET(kARM)); test_reduce_mean(place);