diff --git a/paddle/fluid/lite/CMakeLists.txt b/paddle/fluid/lite/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e0297fcf50cd45f21d2fc592e449871c05f2b02c --- /dev/null +++ b/paddle/fluid/lite/CMakeLists.txt @@ -0,0 +1,11 @@ +cc_library(executor_lite SRCS executor.cc) +cc_library(op_lite SRCS op_lite.cc) +cc_library(memory_lite SRCS memory.cc) +cc_library(tensor_lite SRCS tensor.cc DEPS memory_lite) +cc_library(op_registry_lite SRCS op_registry.cc) + +add_subdirectory(x86) +add_subdirectory(cuda) +add_subdirectory(operators) +add_subdirectory(kernels) +add_subdirectory(model_parser) diff --git a/paddle/fluid/lite/context.cc b/paddle/fluid/lite/context.cc new file mode 100644 index 0000000000000000000000000000000000000000..2dffeb7176aa0fd6360342ac4a7d4f5875ec0123 --- /dev/null +++ b/paddle/fluid/lite/context.cc @@ -0,0 +1,19 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// Created by chunwei on 19-2-22. +// + +#include "context.h" diff --git a/paddle/fluid/lite/context.h b/paddle/fluid/lite/context.h new file mode 100644 index 0000000000000000000000000000000000000000..61b893125a6ec6b287d0d4bf2e5c4e94ac77d730 --- /dev/null +++ b/paddle/fluid/lite/context.h @@ -0,0 +1,72 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "target_wrapper.h" + +namespace paddle { +namespace lite { + +template +class Context { + public: + using target_wrapper_t = TargetWrapper; + using stream_t = typename TargetWrapper::stream_t; + + Context() = default; + Context(int device_id, stream_t compute_stream, stream_t data_stream) + : device_id_(device_id), + compute_stream_(compute_stream), + data_stream_(data_stream) {} + + void SetDeviceId(int device_id) { device_id_ = device_id; } + void SetComputeStream(stream_t x) { compute_stream_ = x; } + void SetDataStream(stream_t x) { data_stream_ = x; } + + int device_id() const { return device_id_; } + stream_t compute_stream() const { return compute_stream_; } + stream_t data_stream() const { return data_stream_; } + + private: + int device_id_; + stream_t compute_stream_; + stream_t data_stream_; +}; + +class OpContext final { + public: + template + using target_ptr_t = std::unique_ptr>; + + // @param target valid target. + explicit OpContext(TargetType target) + : targets_(std::vector({target})) {} + // @param target valid target. + explicit OpContext(const std::vector& target) : targets_(target) {} + + const std::vector& target() const { return targets_; } + + template + target_ptr_t CreateContext() { + return target_ptr_t(new Context); + } + + private: + std::vector targets_; +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/cuda/CMakeLists.txt b/paddle/fluid/lite/cuda/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..4ec76c2a5ec0d77e3b3be7fcf3e266fa68d1cd7e --- /dev/null +++ b/paddle/fluid/lite/cuda/CMakeLists.txt @@ -0,0 +1 @@ +nv_library(target_wrapper_cuda SRCS target_wrapper.cc) diff --git a/paddle/fluid/lite/cuda/target_wrapper.cc b/paddle/fluid/lite/cuda/target_wrapper.cc new file mode 100644 index 0000000000000000000000000000000000000000..3376a559665328d5c950282f55a0fef7f40a9b82 --- /dev/null +++ b/paddle/fluid/lite/cuda/target_wrapper.cc @@ -0,0 +1,19 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// Created by chunwei on 19-2-23. +// + +#include "target_wrapper.h" diff --git a/paddle/fluid/lite/cuda/target_wrapper.h b/paddle/fluid/lite/cuda/target_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..b1f8bab3bfa4896b68ad833440d94d42b4a4f40d --- /dev/null +++ b/paddle/fluid/lite/cuda/target_wrapper.h @@ -0,0 +1,21 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +namespace paddle { +namespace framework { +namespace lite { +namespace cuda {} // namespace cuda +} // namespace lite +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/lite/executor.cc b/paddle/fluid/lite/executor.cc new file mode 100644 index 0000000000000000000000000000000000000000..ce71e4de2b8f6ee4332a47e7d26876f75e0d75fd --- /dev/null +++ b/paddle/fluid/lite/executor.cc @@ -0,0 +1,13 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/paddle/fluid/lite/executor.h b/paddle/fluid/lite/executor.h new file mode 100644 index 0000000000000000000000000000000000000000..ce71e4de2b8f6ee4332a47e7d26876f75e0d75fd --- /dev/null +++ b/paddle/fluid/lite/executor.h @@ -0,0 +1,13 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. diff --git a/paddle/fluid/lite/kernels/CMakeLists.txt b/paddle/fluid/lite/kernels/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1401be5e5dc4e520c09a4ffcab8902e43ae7712d --- /dev/null +++ b/paddle/fluid/lite/kernels/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(host) +add_subdirectory(arm) diff --git a/paddle/fluid/lite/kernels/arm/CMakeLists.txt b/paddle/fluid/lite/kernels/arm/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/paddle/fluid/lite/kernels/host/CMakeLists.txt b/paddle/fluid/lite/kernels/host/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..87952539c285769d31fd496ca8afce1f76420810 --- /dev/null +++ b/paddle/fluid/lite/kernels/host/CMakeLists.txt @@ -0,0 +1,2 @@ +cc_library(fc_compute_host SRCS fc_compute.cc DEPS tensor_lite) +cc_library(relu_compute_host SRCS relu_compute.cc DEPS tensor_lite) diff --git a/paddle/fluid/lite/kernels/host/fc_compute.cc b/paddle/fluid/lite/kernels/host/fc_compute.cc new file mode 100644 index 0000000000000000000000000000000000000000..dda42ba3e1ce1a7ab83a728d66a667a4f260f763 --- /dev/null +++ b/paddle/fluid/lite/kernels/host/fc_compute.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/host/fc_compute.h" +#include +#include "paddle/fluid/lite/op_registry.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +// NOTE should use pure std C++ implementation. +void FcCompute::Run() { + using matrix_t = Eigen::Matrix; + using matrix_map_t = Eigen::Map; + + auto& param = this->param(); + + CHECK_EQ(param.in_mat_dims.size(), 2UL); + CHECK_EQ(param.output->dims().size(), 2UL); + Eigen::Map input(param.input->data(), + param.in_mat_dims[0], param.in_mat_dims[1]); + Eigen::Map weight(param.w->data(), param.w->dims()[0], + param.w->dims()[1]); + matrix_map_t output(param.output->mutable_data(), + param.output->dims()[0], param.output->dims()[1]); + + output = weight.transpose() * input; + + if (param.bias) { + Eigen::Map bias(param.bias->data(), + param.bias->dims()[0], + param.bias->dims()[1]); + output += bias; + } +} + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle + +REGISTER_LITE_KERNEL(fc, kHost, kFloat, paddle::lite::kernels::host::FcCompute); diff --git a/paddle/fluid/lite/kernels/host/fc_compute.h b/paddle/fluid/lite/kernels/host/fc_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..1a7e8ae74db1b93a489a7e1ac513e09918a67685 --- /dev/null +++ b/paddle/fluid/lite/kernels/host/fc_compute.h @@ -0,0 +1,36 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/lite/op_kernel.h" +#include "paddle/fluid/lite/operators/fc_op.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +class FcCompute final : public OpKernel { + public: + using param_t = operators::FcParam; + + void Run() override; + + virtual ~FcCompute() = default; +}; + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/kernels/host/relu_compute.cc b/paddle/fluid/lite/kernels/host/relu_compute.cc new file mode 100644 index 0000000000000000000000000000000000000000..e352e493877cda87cbe6c38f59d0d57358bacec7 --- /dev/null +++ b/paddle/fluid/lite/kernels/host/relu_compute.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/kernels/relu_compute.h" diff --git a/paddle/fluid/lite/kernels/host/relu_compute.h b/paddle/fluid/lite/kernels/host/relu_compute.h new file mode 100644 index 0000000000000000000000000000000000000000..70e19e3898b112b2e0ada2d72b362b223e8a7326 --- /dev/null +++ b/paddle/fluid/lite/kernels/host/relu_compute.h @@ -0,0 +1,30 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/lite/op_kernel.h" + +namespace paddle { +namespace lite { +namespace kernels { +namespace host { + +class ReluCompute final : public OpKernel { + public: +}; + +} // namespace host +} // namespace kernels +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/memory.cc b/paddle/fluid/lite/memory.cc new file mode 100644 index 0000000000000000000000000000000000000000..2b85d763da6b31d27a2d8c9fec30ab9a7dfbaed0 --- /dev/null +++ b/paddle/fluid/lite/memory.cc @@ -0,0 +1,21 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/memory.h" + +namespace paddle { +namespace framework { +namespace lite {} // namespace lite +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/lite/memory.h b/paddle/fluid/lite/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..65801c7bdded66666d5c14fa20845483ae16e6d1 --- /dev/null +++ b/paddle/fluid/lite/memory.h @@ -0,0 +1,94 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "target_wrapper.h" + +namespace paddle { +namespace lite { + +void* TargetMalloc(TargetType target, size_t size) { + void* data{nullptr}; + switch (static_cast(target)) { + case static_cast(TargetType::kX86): + data = TargetWrapper::Malloc(size); + break; + case static_cast(TargetType::kCUDA): + data = TargetWrapper::Malloc(size); + break; + case static_cast(TargetType::kARM): + data = TargetWrapper::Malloc(size); + break; + case static_cast(TargetType::kHost): + data = TargetWrapper::Malloc(size); + break; + default: + LOG(FATAL) << "Unknown type"; + } + return data; +} + +void TargetFree(TargetType target, void* data) { + switch (static_cast(target)) { + case static_cast(TargetType::kX86): + TargetWrapper::Free(data); + break; + case static_cast(TargetType::kCUDA): + TargetWrapper::Free(data); + break; + case static_cast(TargetType::kARM): + TargetWrapper::Free(data); + break; + default: + LOG(FATAL) << "Unknown type"; + } +} + +// Memory buffer manager. +class Buffer { + public: + Buffer(TargetType target, size_t size) : space_(size), target_(target) {} + + void* data() const { return data_; } + + void ResetLazy(TargetType target, size_t size) { + if (target != target_ || space_ < size) { + Free(); + } + + if (size < space_) return; + data_ = TargetMalloc(target, size); + target_ = target; + space_ = size; + } + + void ResizeLazy(size_t size) { ResetLazy(target_, size); } + + void Free() { + if (space_ > 0) { + TargetFree(target_, data_); + } + target_ = TargetType::kHost; + space_ = 0; + } + + private: + size_t space_{0}; + void* data_{nullptr}; + TargetType target_{TargetType::kHost}; +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/model_parser/CMakeLists.txt b/paddle/fluid/lite/model_parser/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f72ebd08593cf9fc829e3d271512073a9e9d333 --- /dev/null +++ b/paddle/fluid/lite/model_parser/CMakeLists.txt @@ -0,0 +1 @@ +cc_library(model_parser SRCS model_parser.cc) diff --git a/paddle/fluid/lite/model_parser/model_parser.cc b/paddle/fluid/lite/model_parser/model_parser.cc new file mode 100644 index 0000000000000000000000000000000000000000..5a5fd1bcf9e7711d1777995fd171ead656b9edc7 --- /dev/null +++ b/paddle/fluid/lite/model_parser/model_parser.cc @@ -0,0 +1,19 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +// Created by chunwei on 19-2-25. +// + +#include "model_parser.h" diff --git a/paddle/fluid/lite/model_parser/model_parser.h b/paddle/fluid/lite/model_parser/model_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..75f13fca2a3f353e1507cd63e3e73477e8e1b047 --- /dev/null +++ b/paddle/fluid/lite/model_parser/model_parser.h @@ -0,0 +1,30 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file contains model format related operations, such as load a model, +// parse an operator definitions and so on. + +#include +#include + +namespace paddle { +namespace lite { + +void LoadProgram(const std::string& path); +void LoadParams(const std::string& path); + +void LoadModel(const std::string& model_dir); + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/op_kernel.h b/paddle/fluid/lite/op_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..832a257fb150cd3c0310264303aa3575b8f8cbe8 --- /dev/null +++ b/paddle/fluid/lite/op_kernel.h @@ -0,0 +1,59 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/lite/context.h" +#include "paddle/fluid/lite/target_wrapper.h" +#include "paddle/fluid/lite/utils/all.h" + +namespace paddle { +namespace lite { + +// Light-weight kernel implementation. +// The OpKernel is designed to implement the specific algorithm on a target +// device. +template +class OpKernel { + public: + using context_t = Context; + using context_ptr_t = std::unique_ptr; + + OpKernel() = default; + + void SetContext(context_ptr_t&& ctx) { context_ = std::move(ctx); } + + void SetParam(any param) { param_ = param; } + + template + Param& param() const { + return *any_cast(¶m_); + } + + virtual void Run() { CHECK(false) << "Not Implemented"; } + + virtual ~OpKernel() = default; + + protected: + context_ptr_t context_; + mutable any param_; +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/op_lite.cc b/paddle/fluid/lite/op_lite.cc new file mode 100644 index 0000000000000000000000000000000000000000..f277ecb580015448f60c939c400be4c10b83547b --- /dev/null +++ b/paddle/fluid/lite/op_lite.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "op_lite.h" diff --git a/paddle/fluid/lite/op_lite.h b/paddle/fluid/lite/op_lite.h new file mode 100644 index 0000000000000000000000000000000000000000..cf94ad26c89c5aee7a31b0d5936ceab42c66e9f4 --- /dev/null +++ b/paddle/fluid/lite/op_lite.h @@ -0,0 +1,82 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "context.h" +#include "op_kernel.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/variable.h" + +namespace paddle { +namespace lite { + +using any_t = boost::variant; +using anys_t = std::map; + +// For registry factory. +struct Registry { + void Touch() {} +}; + +/** + * The base class of an light-weight operators, currently just used in inference + * to eliminate overhead of some operations in current framework. + * + * The Operator are designed as follows: + * - it can has some members to hold the argument addresses, + * - it should act just like a function call, no more logic should included. + */ +class OpLite : public Registry { + public: + enum class KernelStrategy { + // Return the user specified one. + kStatic = 0, + // Specify the expected kernel externally. + kSpecified, + // Run each kernel to evaluate and get the best kernel. + kRuntime, + }; + + OpLite() {} + OpLite(std::unique_ptr &&x) : op_context_(std::move(x)) {} + + virtual bool CheckShape() const { return true; } + virtual bool InferShape() const { return true; } + virtual bool Run() = 0; + virtual bool Build(const framework::OpDesc &opdesc, + framework::Scope *scope) = 0; + virtual std::string DebugString() const = 0; + + virtual void StaticPickKernel(const std::vector &valid_targets) = 0; + + void PickBestKernel(const std::vector &valid_places, + KernelStrategy kernel_strategy = KernelStrategy::kStatic); + + // Create all the kernels for the valid targets. + void CreateKernels(); + + virtual ~OpLite() = default; + + protected: + std::unique_ptr op_context_; +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/op_registry.cc b/paddle/fluid/lite/op_registry.cc new file mode 100644 index 0000000000000000000000000000000000000000..23c940458ff53fa03aecdd9549d5c0f2a90df32e --- /dev/null +++ b/paddle/fluid/lite/op_registry.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "op_registry.h" \ No newline at end of file diff --git a/paddle/fluid/lite/op_registry.h b/paddle/fluid/lite/op_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..1e23d58604561b1e12a3d653c74768857004ced0 --- /dev/null +++ b/paddle/fluid/lite/op_registry.h @@ -0,0 +1,136 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "paddle/fluid/lite/op_kernel.h" +#include "paddle/fluid/lite/op_lite.h" +#include "paddle/fluid/lite/target_wrapper.h" +#include "paddle/fluid/lite/utils/all.h" + +namespace paddle { +namespace lite { + +using KernelFunc = std::function; +using KernelFuncCreator = std::function()>; + +class LiteOpRegistry final : public Factory { + public: + static LiteOpRegistry &Global() { + static auto *x = new LiteOpRegistry; + return *x; + } + + private: + LiteOpRegistry() = default; +}; + +template +class OpLiteRegistor : public Registor { + public: + OpLiteRegistor(const std::string &op_type) + : Registor([&] { + LiteOpRegistry::Global().Register( + op_type, []() -> std::unique_ptr { + return std::unique_ptr(new OpClass); + }); + }) {} +}; + +template +class KernelRegistryForTarget : public Factory> {}; + +class KernelRegistry final { + public: + KernelRegistry() { +#define INIT_FOR(target__, precision__) \ + registries_[KernelRegistry::GetKernelOffset()] = \ + &KernelRegistryForTarget::Global(); + // Currently, just register 2 kernel targets. + INIT_FOR(kARM, kFloat); + INIT_FOR(kHost, kFloat); +#undef INIT_FOR + } + + static KernelRegistry &Global() { + static auto *x = new KernelRegistry; + return *x; + } + + template + void Register(const std::string &name, + typename KernelRegistryForTarget::creator_t + &&creator) { + using kernel_registor_t = KernelRegistryForTarget; + any_cast( + registries_[GetKernelOffset()]) + ->Register(name, std::move(creator)); + } + + // Get a kernel registry offset in all the registries. + template + static constexpr int GetKernelOffset() { + return kNumTargets * static_cast(Target) + static_cast(Precision); + } + + private: + std::array registries_; +}; + +template +class KernelRegistor : public lite::Registor { + public: + KernelRegistor(const std::string op_type) + : Registor([&] { + KernelRegistry::Global().Register( + op_type, [&]() -> std::unique_ptr { + return std::unique_ptr(new KernelType); + }); + }) {} +}; + +} // namespace lite +} // namespace paddle + +// Operator registry +#define LITE_OP_REGISTER_INSTANCE(op_type__) op_type__##__registry__instance__ +#define LITE_OP_REGISTER_FAKE(op_type__) op_type__##__registry__ +#define REGISTER_LITE_OP(op_type__, OpClass) \ + static paddle::lite::OpLiteRegistor LITE_OP_REGISTER_INSTANCE( \ + op_type__)(#op_type__); + +#define USE_LITE_OP(op_type__) \ + int LITE_OP_REGISTER_FAKE(op_type__)((unused)) = \ + LITE_OP_REGISTER_INSTANCE(op_type__).Touch(); + +// Kernel registry +#define LITE_KERNEL_REGISTER(op_type__, target__, precision__) \ + op_type__##target__##precision__##__registor__ +#define LITE_KERNEL_REGISTER_INSTANCE(op_type__, target__, precision__) \ + op_type__##target__##precision__##__registor__instance__ +#define LITE_KERNEL_REGISTER_FAKE(op_type__, target__, precision__) \ + LITE_KERNEL_REGISTER_INSTANCE(op_type__, target__, precision__)##__fake__ + +#define REGISTER_LITE_KERNEL(op_type__, target__, precision__, KernelClass) \ + static paddle::lite::KernelRegistor \ + LITE_KERNEL_REGISTER_INSTANCE(op_type__, target__, \ + precision__)(#op_type__); + +#define USE_LITE_KERNEL(op_type__, target__, precision__) \ + int LITE_KERNEL_REGISTER_FAKE(op_type__, target__, precision__)((unused)) = \ + LITE_KERNEL_REGISTER(op_type__, target__, precision__).Touch(); diff --git a/paddle/fluid/lite/operators/CMakeLists.txt b/paddle/fluid/lite/operators/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..cbcc848f7820d28fa0d7166e133e1f3532824763 --- /dev/null +++ b/paddle/fluid/lite/operators/CMakeLists.txt @@ -0,0 +1,2 @@ +cc_library(fc_op_lite SRCS fc_op.cc DEPS op_lite) +cc_library(relu_op_lite SRCS relu_op.cc DEPS op_lite) diff --git a/paddle/fluid/lite/operators/fc_op.cc b/paddle/fluid/lite/operators/fc_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..245941832b2f725afde0803383154c6a7fa2f8a3 --- /dev/null +++ b/paddle/fluid/lite/operators/fc_op.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "fc_op.h" +#include "paddle/fluid/lite/op_registry.h" + +namespace paddle { +namespace lite { +namespace operators { + +bool FcOpLite::CheckShape() const { + CHECK_OR_FALSE(param_.input); + CHECK_OR_FALSE(param_.output); + CHECK_OR_FALSE(param_.w); + // bias is optional. + + const auto input_dims = param_.input->dims(); + const auto w_dims = param_.w->dims(); + + if (param_.bias) { + const auto bias_dims = param_.bias->dims(); + if (bias_dims.size() == 2) { + CHECK_EQ_OR_FALSE(bias_dims[0], 1); + CHECK_EQ_OR_FALSE(bias_dims[1], w_dims[1]); + } else if (bias_dims.size() == 1) { + CHECK_EQ_OR_FALSE(bias_dims[0], w_dims[1]); + } + } + + CHECK_EQ_OR_FALSE(w_dims.size(), 2UL); + CHECK_GT_OR_FALSE(input_dims.size(), + static_cast(param_.in_num_col_dims)); + + param_.in_mat_dims = lite::flatten_to_2d(input_dims, param_.in_num_col_dims); + CHECK_EQ_OR_FALSE(param_.in_mat_dims[1], w_dims[0]); + + return true; +} + +bool FcOpLite::InferShape() const { + const auto input_dims = param_.input->dims(); + const auto w_dims = param_.w->dims(); + + // Set output dims + std::vector output_dims(param_.in_num_col_dims + 1, 0); + for (int i = 0; i < param_.in_num_col_dims; ++i) { + output_dims[i] = input_dims[i]; + } + output_dims.back() = w_dims[1]; + param_.output->Resize(output_dims); + + // share LoD + // param_.output->set_lod(param_.input->lod()); + return true; +} + +} // namespace operators +} // namespace lite +} // namespace paddle + +REGISTER_LITE_OP(fc, paddle::lite::operators::FcOpLite); diff --git a/paddle/fluid/lite/operators/fc_op.h b/paddle/fluid/lite/operators/fc_op.h new file mode 100644 index 0000000000000000000000000000000000000000..588b5e7c112b4b0fa0adbeade5fd74e843bda4a8 --- /dev/null +++ b/paddle/fluid/lite/operators/fc_op.h @@ -0,0 +1,60 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "paddle/fluid/lite/op_lite.h" +#include "paddle/fluid/lite/tensor.h" +#include "paddle/fluid/lite/utils/all.h" + +namespace paddle { +namespace lite { +namespace operators { + +struct FcParam { + Tensor* input{nullptr}; + Tensor* w{nullptr}; + Tensor* bias{nullptr}; + Tensor* output{nullptr}; + // the input matrix dimentions. + lite::DDim in_mat_dims; + int in_num_col_dims{0}; +}; + +class FcOpLite : public OpLite { + public: + FcOpLite() {} + + bool CheckShape() const override; + + bool InferShape() const override; + + bool Run() override { return false; } + + bool Build(const framework::OpDesc& opdesc, + framework::Scope* scope) override { + return false; + } + + std::string DebugString() const override { return "fc"; } + + void StaticPickKernel(const std::vector& valid_targets) override {} + + private: + mutable FcParam param_; +}; + +} // namespace operators +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/operators/relu_op.cc b/paddle/fluid/lite/operators/relu_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..675acf61d130df7cc78ceba8574f8a5deb73e49a --- /dev/null +++ b/paddle/fluid/lite/operators/relu_op.cc @@ -0,0 +1,44 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/operators/relu_op.h" +#include "paddle/fluid/lite/op_registry.h" + +namespace paddle { +namespace lite { +namespace operators { + +bool ReluOp::CheckShape() const { return true; } +bool ReluOp::InferShape() const { + CHECK_OR_FALSE(param_.input); + CHECK_OR_FALSE(param_.output); + // TODO(Superjomn) Enable data sharing. + param_.output->Resize(param_.input->dims()); + // param_.output->ShareDataWith(*param_.input); + // share lod + // param_.output->set_lod(param_.input->lod()); + return true; +} + +bool ReluOp::Run() { return false; } + +bool ReluOp::Build(const framework::OpDesc &opdesc, framework::Scope *scope) { + return false; +} + +REGISTER_LITE_OP(relu, ReluOp); + +} // namespace operators +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/operators/relu_op.h b/paddle/fluid/lite/operators/relu_op.h new file mode 100644 index 0000000000000000000000000000000000000000..0a9a6c9399c3ce1e5eb32cc34586b3c705fafce7 --- /dev/null +++ b/paddle/fluid/lite/operators/relu_op.h @@ -0,0 +1,52 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include "paddle/fluid/lite/op_lite.h" +#include "paddle/fluid/lite/tensor.h" +#include "paddle/fluid/lite/utils/all.h" + +namespace paddle { +namespace lite { +namespace operators { + +struct ReluParam { + Tensor* input{nullptr}; + Tensor* output{nullptr}; +}; + +class ReluOp : public OpLite { + public: + ReluOp() {} + + bool CheckShape() const override; + + bool InferShape() const override; + + bool Run() override; + + bool Build(const framework::OpDesc& opdesc, framework::Scope* scope) override; + + std::string DebugString() const override { return "tanh"; } + + void StaticPickKernel(const std::vector& valid_targets) override {} + + private: + mutable ReluParam param_; +}; + +} // namespace operators +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/target_wrapper.h b/paddle/fluid/lite/target_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..f7bcd3793aea04e285f026c6d691b97bac05e9bf --- /dev/null +++ b/paddle/fluid/lite/target_wrapper.h @@ -0,0 +1,87 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +namespace paddle { +namespace lite { + +enum class TargetType { kHost = 0, kX86, kCUDA, kARM, kLastAsPlaceHolder }; +#define TARGET(item__) paddle::lite::TargetType::item__ +#define TARGET_VAL(item__) static_cast(TARGET(item__)) + +constexpr int kNumTargets = TARGET_VAL(kLastAsPlaceHolder) - TARGET_VAL(kHost); + +/* +template +struct Target {}; + +using Host = Target; +using X86 = Target; +using CUDA = Target; +using ARM = Target; + */ + +enum class PrecisionType { kFloat = 0, kInt8, kLastAsPlaceHolder }; + +#define PRECISION(item__) paddle::lite::PrecisionType::item__ +#define PRECISION_VAL(item__) static_cast(PRECISION(item__)) +constexpr int kNumPrecisions = + PRECISION_VAL(kLastAsPlaceHolder) - PRECISION_VAL(kFloat); + +// Event sync for multi-stream devices like CUDA and OpenCL. +template +class Event {}; + +// Memory copy directions. +enum class IoDirection { + HtoH = 0, + HtoD, + DtoH, +}; + +// This interface should be specified by each kind of target. +template +class TargetWrapper { + public: + using stream_t = int; + using event_t = Event; + + static size_t num_devices() { return 0; } + static size_t maximum_stream() { return 0; } + + static void CreateStream(stream_t* stream) {} + static void DestroyStream(const stream_t& stream) {} + + static void CreateEvent(event_t* event) {} + static void DestroyEvent(const event_t& event) {} + + static void RecordEvent(const event_t& event) {} + static void SyncEvent(const event_t& event) {} + + static void StreamSync(const stream_t& stream) {} + + static void* Malloc(size_t size) { return nullptr; } + static void Free(void* ptr) {} + + static void MemcpySync(void* dst, void* src, size_t size, IoDirection dir) {} + static void MemcpyAsync(void* dst, void* src, size_t size, + const stream_t& stream, IoDirection dir) { + MemcpySync(dst, src, size, dir); + } +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/tensor.cc b/paddle/fluid/lite/tensor.cc new file mode 100644 index 0000000000000000000000000000000000000000..28d76f0793f439764bf25d3d7cc46636e9447536 --- /dev/null +++ b/paddle/fluid/lite/tensor.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "tensor.h" diff --git a/paddle/fluid/lite/tensor.h b/paddle/fluid/lite/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..86b625bd239cecc46145fad270517cdbee8c3875 --- /dev/null +++ b/paddle/fluid/lite/tensor.h @@ -0,0 +1,84 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include "memory.h" + +namespace paddle { +namespace lite { + +template +class EventTree { + public: + using event_t = Event; + + void AddChild(const event_t& event) { children_.push_back(event); } + + void Sync() { + for (auto& event : children_) { + TargetWrapper::SyncEvent(event); + } + } + + private: + std::vector children_; +}; + +using DDim = std::vector; +DDim SliceDims(const DDim& dims, int begin, int end) { + return DDim(dims.begin() + begin, dims.begin() + end - 1); +} + +int product(const DDim& dims) { + return std::accumulate(dims.begin(), dims.end(), 1, + [](int a, int b) { return a * b; }); +} + +DDim flatten_to_2d(const DDim& dims, int col) { + return DDim({product(SliceDims(dims, 0, col)), + product(SliceDims(dims, col, dims.size()))}); +} + +// A light-weight tensor implementation. +class Tensor { + public: + void SyncEventTree(); + + template + const T* data() const { + return static_cast(buffer_.data()); + } + + void Resize(const DDim& ddim) { dims_ = ddim; } + + const DDim& dims() const { return dims_; } + + template + T* mutable_data() { + buffer_.ResetLazy(target_, product(dims_)); + return static_cast(buffer_.data()); + } + + bool IsInitialized() const { return buffer_.data(); } + + private: + TargetType target_{TargetType::kHost}; + DDim dims_; + Buffer buffer_; +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/utils/all.h b/paddle/fluid/lite/utils/all.h new file mode 100644 index 0000000000000000000000000000000000000000..79f7e9e0eb5ab627f421e2f904307bc1fce1a923 --- /dev/null +++ b/paddle/fluid/lite/utils/all.h @@ -0,0 +1,20 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/lite/utils/any.h" +#include "paddle/fluid/lite/utils/check.h" +#include "paddle/fluid/lite/utils/factory.h" +#include "paddle/fluid/lite/utils/macros.h" diff --git a/paddle/fluid/lite/utils/any.h b/paddle/fluid/lite/utils/any.h new file mode 100644 index 0000000000000000000000000000000000000000..20c4a6faadbf93d79d66c00e61caf86e9837680d --- /dev/null +++ b/paddle/fluid/lite/utils/any.h @@ -0,0 +1,129 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include + +// This is an equivalent implementation of boost::any. We implement this to +// avoid including the whole boost library and keep the inference library small. +// These code references https://gist.github.com/shoooe/9202235 + +namespace paddle { +namespace lite { + +class any; +template +Type any_cast(any&); +template +Type any_cast(const any&); +template +Type* any_cast(any*); +template +const Type* any_cast(const any*); +struct bad_any_cast : public std::bad_cast {}; + +class any { + public: + template + friend Type any_cast(any&); + + template + friend Type any_cast(const any&); + + template + friend Type* any_cast(any*); + + template + friend const Type* any_cast(const any*); + + any() : ptr(nullptr) {} + explicit any(any&& x) : ptr(std::move(x.ptr)) {} + + explicit any(const any& x) { + if (x.ptr) ptr = x.ptr->clone(); + } + + template + explicit any(const Type& x) + : ptr(new concrete::type>(x)) {} + any& operator=(any&& rhs) { + ptr = std::move(rhs.ptr); + return (*this); + } + any& operator=(const any& rhs) { + ptr = std::move(any(rhs).ptr); + return (*this); + } + template + any& operator=(T&& x) { + ptr.reset(new concrete::type>( + typename std::decay::type(x))); + return (*this); + } + template + any& operator=(const T& x) { + ptr.reset(new concrete::type>( + typename std::decay::type(x))); + return (*this); + } + void clear() { ptr.reset(nullptr); } + bool empty() const { return ptr == nullptr; } + const std::type_info& type() const { + return (!empty()) ? ptr->type() : typeid(void); + } + + private: + struct placeholder { + virtual std::unique_ptr clone() const = 0; + virtual const std::type_info& type() const = 0; + virtual ~placeholder() {} + }; + + template + struct concrete : public placeholder { + explicit concrete(T&& x) : value(std::move(x)) {} + explicit concrete(const T& x) : value(x) {} + virtual std::unique_ptr clone() const override { + return std::unique_ptr(new concrete(value)); + } + virtual const std::type_info& type() const override { return typeid(T); } + T value; + }; + + std::unique_ptr ptr; +}; + +template +Type any_cast(any& val) { + if (val.ptr->type() != typeid(Type)) throw bad_any_cast(); + return static_cast*>(val.ptr.get())->value; +} +template +Type any_cast(const any& val) { + return any_cast(any(val)); +} +template +Type* any_cast(any* ptr) { + return dynamic_cast(ptr->ptr.get()); +} +template +const Type* any_cast(const any* ptr) { + return dynamic_cast(ptr->ptr.get()); +} + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/utils/check.h b/paddle/fluid/lite/utils/check.h new file mode 100644 index 0000000000000000000000000000000000000000..ee0fa1f2d2e661eeba92e1f3176e726b65e822b4 --- /dev/null +++ b/paddle/fluid/lite/utils/check.h @@ -0,0 +1,41 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#define CHECK_OR_FALSE(cond) \ + if (!(cond)) { \ + LOG(ERROR) << #cond << " test error!"; \ + return false; \ + } +#define CHECK_EQ_OR_FALSE(a__, b__) \ + if ((a__) != (b__)) { \ + LOG(ERROR) << #a__ << " == " << #b__ << " check failed!"; \ + LOG(ERROR) << a__ << " != " << b__; \ + return false; \ + } + +#define CHECK_GT_OR_FALSE(a__, b__) \ + if (!((a__) > (b__))) { \ + LOG(ERROR) << #a__ << " > " << #b__ << " check failed!"; \ + LOG(ERROR) << a__ << " <= " << b__; \ + return false; \ + } + +#define CHECK_GE_OR_FALSE(a__, b__) \ + if (!((a__) >= (b__))) { \ + LOG(ERROR) << #a__ << " >= " << #b__ << " check failed!"; \ + LOG(ERROR) << a__ << " < " << b__; \ + return false; \ + } diff --git a/paddle/fluid/lite/utils/factory.h b/paddle/fluid/lite/utils/factory.h new file mode 100644 index 0000000000000000000000000000000000000000..0d74d8b3a392f44230649deec9bc80fda2820cf6 --- /dev/null +++ b/paddle/fluid/lite/utils/factory.h @@ -0,0 +1,62 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include + +namespace paddle { +namespace lite { + +template +class Factory { + public: + using item_t = ItemType; + using self_t = Factory; + using item_ptr_t = std::unique_ptr; + using creator_t = std::function; + + static Factory& Global() { + static Factory* x = new self_t; + return *x; + } + + void Register(const std::string& op_type, creator_t&& creator) { + CHECK(!creators_.count(op_type)) << "The op " << op_type + << " has already registered"; + creators_.emplace(op_type, std::move(creator)); + } + + item_ptr_t Create(const std::string& op_type) const { + auto it = creators_.find(op_type); + CHECK(it != creators_.end()); + return it->second(); + } + + protected: + std::unordered_map creators_; +}; + +/* A helper function to help run a lambda at the start. + */ +template +class Registor { + public: + Registor(std::function&& functor) { functor(); } + + int Touch() { return 0; } +}; + +} // namespace lite +} // namespace paddle diff --git a/paddle/fluid/lite/utils/macros.h b/paddle/fluid/lite/utils/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..52ad44c7068ab29171a616c6f618d13d847cb998 --- /dev/null +++ b/paddle/fluid/lite/utils/macros.h @@ -0,0 +1,21 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#ifndef DISALLOW_COPY_AND_ASSIGN +#define DISALLOW_COPY_AND_ASSIGN(class__) \ + class__(const class__&) = delete; \ + class__& operator=(const class__&) = delete; +#endif diff --git a/paddle/fluid/lite/x86/CMakeLists.txt b/paddle/fluid/lite/x86/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..7cbf432c165f63556d34398e8caaa4ecd3002a6b --- /dev/null +++ b/paddle/fluid/lite/x86/CMakeLists.txt @@ -0,0 +1 @@ +cc_library(target_wrapper_x86 SRCS target_wrapper.cc) diff --git a/paddle/fluid/lite/x86/target_wrapper.cc b/paddle/fluid/lite/x86/target_wrapper.cc new file mode 100644 index 0000000000000000000000000000000000000000..533565787c24d3602cfaf59d800953611715678c --- /dev/null +++ b/paddle/fluid/lite/x86/target_wrapper.cc @@ -0,0 +1,33 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "target_wrapper.h" +#include + +namespace paddle { +namespace framework { +namespace lite { + +template <> +void TargetWrapper::MemcpySync(void* dst, void* src, size_t size, + IoDirection dir) { + std::copy_n(reinterpret_cast(src), size, + reinterpret_cast(dst)); +} + +template class TargetWrapper; + +} // namespace lite +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/lite/x86/target_wrapper.h b/paddle/fluid/lite/x86/target_wrapper.h new file mode 100644 index 0000000000000000000000000000000000000000..f105f4af83b76520e664e76ff567bbffa968c11f --- /dev/null +++ b/paddle/fluid/lite/x86/target_wrapper.h @@ -0,0 +1,24 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include "paddle/fluid/lite/target_wrapper.h" + +namespace paddle { +namespace framework { +namespace lite { +namespace x86 {} // namespace x86 +} // namespace lite +} // namespace framework +} // namespace paddle