From 6fbeafe07eb13e8407abb90611a9b5a99f2483f4 Mon Sep 17 00:00:00 2001 From: Chunwei Date: Mon, 24 Jun 2019 08:14:01 +0000 Subject: [PATCH] add high level API --- paddle/fluid/lite/api/CMakeLists.txt | 21 +++- paddle/fluid/lite/api/cxx_api.cc | 2 +- paddle/fluid/lite/api/cxx_api.h | 3 +- paddle/fluid/lite/api/cxx_api_impl.cc | 87 ++++++++++++++ paddle/fluid/lite/api/light_api_impl.cc | 69 +++++++++++ paddle/fluid/lite/api/paddle_api.cc | 69 +++++++++++ paddle/fluid/lite/api/paddle_api.h | 110 ++++++++++++++++++ paddle/fluid/lite/api/paddle_api_test.cc | 80 +++++++++++++ paddle/fluid/lite/api/place.cc | 93 +++++++++++++++ paddle/fluid/lite/api/place.h | 115 ++++++++++++++++++ paddle/fluid/lite/core/CMakeLists.txt | 2 +- paddle/fluid/lite/core/target_wrapper.cc | 28 +---- paddle/fluid/lite/core/target_wrapper.h | 141 +++-------------------- paddle/fluid/lite/tools/build.sh | 2 +- 14 files changed, 662 insertions(+), 160 deletions(-) create mode 100644 paddle/fluid/lite/api/cxx_api_impl.cc create mode 100644 paddle/fluid/lite/api/light_api_impl.cc create mode 100644 paddle/fluid/lite/api/paddle_api.cc create mode 100644 paddle/fluid/lite/api/paddle_api.h create mode 100644 paddle/fluid/lite/api/paddle_api_test.cc create mode 100644 paddle/fluid/lite/api/place.cc create mode 100644 paddle/fluid/lite/api/place.h diff --git a/paddle/fluid/lite/api/CMakeLists.txt b/paddle/fluid/lite/api/CMakeLists.txt index d3d8c4900f5..68e5eaacc47 100644 --- a/paddle/fluid/lite/api/CMakeLists.txt +++ b/paddle/fluid/lite/api/CMakeLists.txt @@ -6,6 +6,8 @@ if(LITE_WITH_CUDA) nv_test(test_cxx_api_lite_cuda SRCS cxx_api_test.cc DEPS cxx_api_lite_cuda) endif() +lite_cc_library(place_lite SRCS place.cc DEPS glog) + lite_cc_library(lite_api_test_helper SRCS lite_api_test_helper.cc DEPS scope_lite optimizer_lite target_wrapper_host model_parser_lite program_lite ${ops_lite} ${host_kernels} @@ -24,7 +26,13 @@ message(STATUS "get ops ${ops_lite}") message(STATUS "get Host kernels ${host_kernels}") message(STATUS "get ARM kernels ${arm_kernels}") -lite_cc_library(cxx_api_lite SRCS cxx_api.cc DEPS ${cxx_api_lite_deps} ${ops_lite} ${host_kernels} program_lite) +lite_cc_library(cxx_api_lite + SRCS cxx_api.cc + DEPS ${cxx_api_lite_deps} ${ops_lite} ${host_kernels} program_lite + X86_DEPS ${x86_kernels} operator + ARM_DEPS ${arm_kernels} + CL_DEPS ${opencl_kenrels} + ) lite_cc_library(light_api_lite SRCS light_api.cc DEPS scope_lite target_wrapper_host model_parser_lite @@ -32,6 +40,7 @@ lite_cc_library(light_api_lite SRCS light_api.cc CUDA_DEPS target_wrapper_cuda X86_DEPS ${x86_kernels} operator ARM_DEPS ${arm_kernels} + CL_DEPS ${opencl_kenrels} ) include(ExternalProject) @@ -91,6 +100,16 @@ lite_cc_test(test_apis_lite SRCS apis_test.cc ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model --optimized_model=${LITE_MODEL_DIR}/lite_naive_model_opt SERIAL) +lite_cc_library(cxx_api_impl_lite SRCS cxx_api_impl.cc DEPS cxx_api_lite) +lite_cc_library(light_api_impl_lite SRCS light_api_impl.cc DEPS light_api_lite) + +lite_cc_library(paddle_api_lite SRCS paddle_api.cc DEPS cxx_api_impl_lite light_api_impl_lite) +lite_cc_test(test_paddle_api_lite SRCS paddle_api_test.cc DEPS cxx_api_lite light_api_lite paddle_api_lite + ARGS --model_dir=${LITE_MODEL_DIR}/lite_naive_model SERIAL) +if (WITH_TESTING) + add_dependencies(test_paddle_api_lite test_apis_lite) +endif() + #lite_cc_binary(cxx_api_lite_bin SRCS cxx_api_bin.cc #X86_DEPS operator #DEPS light_api_lite model_parser_lite target_wrapper_host mir_passes diff --git a/paddle/fluid/lite/api/cxx_api.cc b/paddle/fluid/lite/api/cxx_api.cc index 4db6f99aeba..0cb6c12dbcf 100644 --- a/paddle/fluid/lite/api/cxx_api.cc +++ b/paddle/fluid/lite/api/cxx_api.cc @@ -38,7 +38,7 @@ lite::Tensor *Predictor::GetInput(size_t offset) { return &feed_list->at(offset); } -const lite::Tensor *Predictor::GetOutput(size_t offset) { +const lite::Tensor *Predictor::GetOutput(size_t offset) const { auto *_fetch_list = program_->exec_scope()->FindVar("fetch"); CHECK(_fetch_list) << "no fatch variable in exec_scope"; auto &fetch_list = *_fetch_list->GetMutable>(); diff --git a/paddle/fluid/lite/api/cxx_api.h b/paddle/fluid/lite/api/cxx_api.h index da728b2dceb..e7bed3f9031 100644 --- a/paddle/fluid/lite/api/cxx_api.h +++ b/paddle/fluid/lite/api/cxx_api.h @@ -17,6 +17,7 @@ #include #include #include +#include "paddle/fluid/lite/api/paddle_api.h" #include "paddle/fluid/lite/core/op_lite.h" #include "paddle/fluid/lite/core/optimizer.h" #include "paddle/fluid/lite/core/program.h" @@ -53,7 +54,7 @@ class Predictor { lite::Tensor* GetInput(size_t offset); // Get offset-th col of fetch results. - const lite::Tensor* GetOutput(size_t offset); + const lite::Tensor* GetOutput(size_t offset) const; const framework::proto::ProgramDesc& program_desc() const; const lite::Tensor* GetTensor(const std::string& name) const; diff --git a/paddle/fluid/lite/api/cxx_api_impl.cc b/paddle/fluid/lite/api/cxx_api_impl.cc new file mode 100644 index 00000000000..83a9f99965f --- /dev/null +++ b/paddle/fluid/lite/api/cxx_api_impl.cc @@ -0,0 +1,87 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/paddle_api.h" + +namespace paddle { +namespace lite { + +class CxxPaddleApiImpl : public lite_api::PaddlePredictor { + public: + CxxPaddleApiImpl(); + + /// Create a new predictor from a config. + void Init(const lite_api::CxxConfig &config); + + std::unique_ptr GetInput(int i) override; + + std::unique_ptr GetOutput(int i) const override; + + void Run() override; + + std::unique_ptr GetTensor( + const std::string &name) const override; + + void SaveOptimizedModel(const std::string &model_dir) override; + + private: + Predictor raw_predictor_; +}; + +CxxPaddleApiImpl::CxxPaddleApiImpl() {} + +void CxxPaddleApiImpl::Init(const lite_api::CxxConfig &config) { + auto places = config.valid_places(); + places.emplace_back(TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny)); + raw_predictor_.Build(config.model_dir(), config.preferred_place(), places); +} + +std::unique_ptr CxxPaddleApiImpl::GetInput(int i) { + auto *x = raw_predictor_.GetInput(i); + return std::unique_ptr(new lite_api::Tensor(x)); +} + +std::unique_ptr CxxPaddleApiImpl::GetOutput( + int i) const { + const auto *x = raw_predictor_.GetOutput(i); + return std::unique_ptr(new lite_api::Tensor(x)); +} + +void CxxPaddleApiImpl::Run() { raw_predictor_.Run(); } + +std::unique_ptr CxxPaddleApiImpl::GetTensor( + const std::string &name) const { + auto *x = raw_predictor_.GetTensor(name); + return std::unique_ptr(new lite_api::Tensor(x)); +} + +void CxxPaddleApiImpl::SaveOptimizedModel(const std::string &model_dir) { + raw_predictor_.SaveModel(model_dir); +} + +} // namespace lite + +namespace lite_api { + +template <> +std::shared_ptr CreatePaddlePredictor( + const CxxConfig &config) { + auto x = std::make_shared(); + x->Init(config); + return x; +} + +} // namespace lite_api +} // namespace paddle diff --git a/paddle/fluid/lite/api/light_api_impl.cc b/paddle/fluid/lite/api/light_api_impl.cc new file mode 100644 index 00000000000..3f69f355736 --- /dev/null +++ b/paddle/fluid/lite/api/light_api_impl.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/api/light_api.h" +#include "paddle/fluid/lite/api/paddle_api.h" + +namespace paddle { +namespace lite_api { + +class LightPredictorImpl : public PaddlePredictor { + public: + LightPredictorImpl() = default; + + std::unique_ptr GetInput(int i) override; + + std::unique_ptr GetOutput(int i) const override; + + void Run() override; + + std::unique_ptr GetTensor( + const std::string& name) const override; + + void Init(const MobileConfig& config); + + private: + std::unique_ptr raw_predictor_; +}; + +void LightPredictorImpl::Init(const MobileConfig& config) { + raw_predictor_.reset(new lite::LightPredictor(config.model_dir())); +} + +std::unique_ptr LightPredictorImpl::GetInput(int i) { + return std::unique_ptr(new Tensor(raw_predictor_->GetInput(i))); +} + +std::unique_ptr LightPredictorImpl::GetOutput(int i) const { + return std::unique_ptr(new Tensor(raw_predictor_->GetOutput(i))); +} + +void LightPredictorImpl::Run() { raw_predictor_->Run(); } + +std::unique_ptr LightPredictorImpl::GetTensor( + const std::string& name) const { + return std::unique_ptr( + new Tensor(raw_predictor_->GetTensor(name))); +} + +template <> +std::shared_ptr CreatePaddlePredictor( + const MobileConfig& config) { + auto x = std::make_shared(); + x->Init(config); + return x; +} + +} // namespace lite_api +} // namespace paddle diff --git a/paddle/fluid/lite/api/paddle_api.cc b/paddle/fluid/lite/api/paddle_api.cc new file mode 100644 index 00000000000..af78d35b691 --- /dev/null +++ b/paddle/fluid/lite/api/paddle_api.cc @@ -0,0 +1,69 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/api/paddle_api.h" +#include "paddle/fluid/lite/api/cxx_api.h" +#include "paddle/fluid/lite/api/light_api.h" + +namespace paddle { +namespace lite_api { + +Tensor::Tensor(void *raw) : raw_tensor_(raw) {} + +// TODO(Superjomn) refine this by using another `const void* const_raw`; +Tensor::Tensor(const void *raw) { raw_tensor_ = const_cast(raw); } + +lite::Tensor *tensor(void *x) { return static_cast(x); } +const lite::Tensor *ctensor(void *x) { + return static_cast(x); +} + +void Tensor::Resize(const shape_t &shape) { + tensor(raw_tensor_)->Resize(shape); +} + +template <> +const float *Tensor::data() const { + return ctensor(raw_tensor_)->data(); +} +template <> +const int8_t *Tensor::data() const { + return ctensor(raw_tensor_)->data(); +} + +template <> +float *Tensor::mutable_data() const { + return tensor(raw_tensor_)->mutable_data(); +} +template <> +int8_t *Tensor::mutable_data() const { + return tensor(raw_tensor_)->mutable_data(); +} + +shape_t Tensor::shape() const { + return ctensor(raw_tensor_)->dims().Vectorize(); +} + +void PaddlePredictor::SaveOptimizedModel(const std::string &model_dir) { + LOG(ERROR) + << "The SaveOptimizedModel API is only supported by CxxConfig predictor."; +} + +template +std::shared_ptr CreatePaddlePredictor(const ConfigT &) { + return std::shared_ptr(); +} + +} // namespace lite_api +} // namespace paddle diff --git a/paddle/fluid/lite/api/paddle_api.h b/paddle/fluid/lite/api/paddle_api.h new file mode 100644 index 00000000000..90685039950 --- /dev/null +++ b/paddle/fluid/lite/api/paddle_api.h @@ -0,0 +1,110 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file defines PaddlePredictor, the api for lite. It supports multiple + * hardware including ARM, X86, OpenCL, CUDA and so on. + */ + +#ifndef PADDLE_LITE_API_H_ // NOLINT +#define PADDLE_LITE_API_H_ +#include +#include +#include +#include "place.h" // NOLINT + +namespace paddle { +namespace lite_api { + +using shape_t = std::vector; + +struct Tensor { + explicit Tensor(void* raw); + explicit Tensor(const void* raw); + + void Resize(const shape_t& shape); + + /// Readonly data. + template + const T* data() const; + + template + T* mutable_data() const; + + /// Shape of the tensor. + shape_t shape() const; + + private: + void* raw_tensor_; +}; + +/// The PaddlePredictor defines the basic interfaces for different kinds of +/// predictors. +class PaddlePredictor { + public: + PaddlePredictor() = default; + + /// Get i-th input. + virtual std::unique_ptr GetInput(int i) = 0; + + /// Get i-th output. + virtual std::unique_ptr GetOutput(int i) const = 0; + + virtual void Run() = 0; + + /// Get a readonly tensor, return null if no one called `name` exists. + virtual std::unique_ptr GetTensor( + const std::string& name) const = 0; + + /// Persist the optimized model to disk. This API is only supported by + /// CxxConfig, and the persisted model can be reused for MobileConfig. + virtual void SaveOptimizedModel(const std::string& model_dir); + + virtual ~PaddlePredictor() = default; +}; + +/// Base class for all the configs. +class ConfigBase { + std::string model_dir_; + + public: + void set_model_dir(const std::string& x) { model_dir_ = x; } + + const std::string& model_dir() const { return model_dir_; } +}; + +/// CxxConfig is the config for the Full feature predictor. +class CxxConfig : public ConfigBase { + Place preferred_place_; + std::vector valid_places_; + + public: + void set_preferred_place(const Place& x) { preferred_place_ = x; } + void set_valid_places(const std::vector& x) { valid_places_ = x; } + + const Place& preferred_place() const { return preferred_place_; } + const std::vector& valid_places() const { return valid_places_; } +}; + +/// MobileConfig is the config for the light weight predictor, it will skip +/// IR optimization or other unnecessary stages. +class MobileConfig : public ConfigBase {}; + +template +std::shared_ptr CreatePaddlePredictor(const ConfigT&); + +} // namespace lite_api +} // namespace paddle + +#endif // NOLINT diff --git a/paddle/fluid/lite/api/paddle_api_test.cc b/paddle/fluid/lite/api/paddle_api_test.cc new file mode 100644 index 00000000000..f9f3b1e7c40 --- /dev/null +++ b/paddle/fluid/lite/api/paddle_api_test.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/api/paddle_api.h" +#include +#include +#include "paddle/fluid/lite/core/mir/use_passes.h" +#include "paddle/fluid/lite/kernels/use_kernels.h" +#include "paddle/fluid/lite/operators/use_ops.h" + +DEFINE_string(model_dir, "", ""); + +namespace paddle { +namespace lite_api { + +TEST(CxxApi, run) { + lite_api::CxxConfig config; + config.set_model_dir(FLAGS_model_dir); + config.set_preferred_place(Place{TARGET(kX86), PRECISION(kFloat)}); + config.set_valid_places({Place{TARGET(kX86), PRECISION(kFloat)}}); + + auto predictor = lite_api::CreatePaddlePredictor(config); + + auto input_tensor = predictor->GetInput(0); + input_tensor->Resize(std::vector({100, 100})); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < 100 * 100; i++) { + data[i] = i; + } + + predictor->Run(); + + auto output = predictor->GetOutput(0); + auto* out = output->data(); + LOG(INFO) << out[0]; + LOG(INFO) << out[1]; + + EXPECT_NEAR(out[0], 50.2132, 1e-3); + EXPECT_NEAR(out[1], -28.8729, 1e-3); + + predictor->SaveOptimizedModel(FLAGS_model_dir + ".opt2"); +} + +TEST(LightApi, run) { + lite_api::MobileConfig config; + config.set_model_dir(FLAGS_model_dir + ".opt2"); + + auto predictor = lite_api::CreatePaddlePredictor(config); + + auto input_tensor = predictor->GetInput(0); + input_tensor->Resize(std::vector({100, 100})); + auto* data = input_tensor->mutable_data(); + for (int i = 0; i < 100 * 100; i++) { + data[i] = i; + } + + predictor->Run(); + + auto output = predictor->GetOutput(0); + auto* out = output->data(); + LOG(INFO) << out[0]; + LOG(INFO) << out[1]; + + EXPECT_NEAR(out[0], 50.2132, 1e-3); + EXPECT_NEAR(out[1], -28.8729, 1e-3); +} + +} // namespace lite_api +} // namespace paddle diff --git a/paddle/fluid/lite/api/place.cc b/paddle/fluid/lite/api/place.cc new file mode 100644 index 00000000000..6eee4cba65e --- /dev/null +++ b/paddle/fluid/lite/api/place.cc @@ -0,0 +1,93 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/lite/api/place.h" +#include +#include "paddle/fluid/lite/utils/hash.h" + +namespace paddle { +namespace lite_api { + +size_t Place::hash() const { + std::hash h; + size_t hash = h(static_cast(target)); + hash = lite::hash_combine(hash, static_cast(precision)); + hash = lite::hash_combine(hash, static_cast(layout)); + hash = lite::hash_combine(hash, static_cast(device)); + return hash; +} + +bool operator<(const Place& a, const Place& b) { + if (a.target != b.target) return a.target < b.target; + if (a.precision != b.precision) return a.precision < b.precision; + if (a.layout != b.layout) return a.layout < b.layout; + if (a.device != b.device) return a.device < b.device; + return false; +} + +std::string Place::DebugString() const { + std::stringstream os; + os << TargetToStr(target) << "/" << PrecisionToStr(precision) << "/" + << DataLayoutToStr(layout); + return os.str(); +} + +const std::string& TargetToStr(TargetType target) { + static const std::string target2string[] = {"unk", "host", "x86", "cuda", + "arm", "opencl", "any"}; + auto x = static_cast(target); + CHECK_LT(x, static_cast(TARGET(NUM))); + return target2string[x]; +} + +const std::string& PrecisionToStr(PrecisionType precision) { + static const std::string precision2string[] = {"unk", "float", "int8_t", + "any"}; + auto x = static_cast(precision); + CHECK_LT(x, static_cast(PRECISION(NUM))); + return precision2string[x]; +} + +const std::string& DataLayoutToStr(DataLayoutType layout) { + static const std::string datalayout2string[] = {"unk", "NCHW", "any"}; + auto x = static_cast(layout); + CHECK_LT(x, static_cast(DATALAYOUT(NUM))); + return datalayout2string[x]; +} + +const std::string& TargetRepr(TargetType target) { + static const std::string target2string[] = { + "kUnk", "kHost", "kX86", "kCUDA", "kARM", "kOpenCL", "kAny"}; + auto x = static_cast(target); + CHECK_LT(x, static_cast(TARGET(NUM))); + return target2string[x]; +} + +const std::string& PrecisionRepr(PrecisionType precision) { + static const std::string precision2string[] = {"kUnk", "kFloat", "kInt8", + "kInt32", "kAny"}; + auto x = static_cast(precision); + CHECK_LT(x, static_cast(PRECISION(NUM))); + return precision2string[x]; +} + +const std::string& DataLayoutRepr(DataLayoutType layout) { + static const std::string datalayout2string[] = {"kUnk", "kNCHW", "kAny"}; + auto x = static_cast(layout); + CHECK_LT(x, static_cast(DATALAYOUT(NUM))); + return datalayout2string[x]; +} + +} // namespace lite_api +} // namespace paddle diff --git a/paddle/fluid/lite/api/place.h b/paddle/fluid/lite/api/place.h new file mode 100644 index 00000000000..6b0657af198 --- /dev/null +++ b/paddle/fluid/lite/api/place.h @@ -0,0 +1,115 @@ +// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include + +namespace paddle { +namespace lite_api { + +enum class TargetType : int { + kUnk = 0, + kHost, + kX86, + kCUDA, + kARM, + kOpenCL, + kAny, // any target + NUM, // number of fields. +}; +enum class PrecisionType : int { + kUnk = 0, + kFloat, + kInt8, + kInt32, + kAny, // any precision + NUM, // number of fields. +}; +enum class DataLayoutType : int { + kUnk = 0, + kNCHW, + kAny, // any data layout + NUM, // number of fields. +}; + +static size_t PrecisionTypeLength(PrecisionType type) { + switch (type) { + case PrecisionType::kFloat: + return 4; + case PrecisionType::kInt8: + return 1; + case PrecisionType::kInt32: + return 4; + default: + return 4; + } +} + +#define TARGET(item__) paddle::lite_api::TargetType::item__ +#define PRECISION(item__) paddle::lite_api::PrecisionType::item__ +#define DATALAYOUT(item__) paddle::lite_api::DataLayoutType::item__ + +const std::string& TargetToStr(TargetType target); + +const std::string& PrecisionToStr(PrecisionType precision); + +const std::string& DataLayoutToStr(DataLayoutType layout); + +const std::string& TargetRepr(TargetType target); + +const std::string& PrecisionRepr(PrecisionType precision); + +const std::string& DataLayoutRepr(DataLayoutType layout); + +/* + * Place specifies the execution context of a Kernel or input/output for a + * kernel. It is used to make the analysis of the MIR more clear and accurate. + */ +struct Place { + TargetType target{TARGET(kUnk)}; + PrecisionType precision{PRECISION(kUnk)}; + DataLayoutType layout{DATALAYOUT(kUnk)}; + int16_t device{0}; // device ID + + Place() = default; + Place(TargetType target, PrecisionType precision, + DataLayoutType layout = DATALAYOUT(kNCHW), int16_t device = 0) + : target(target), precision(precision), layout(layout), device(device) {} + + bool is_valid() const { + return target != TARGET(kUnk) && precision != PRECISION(kUnk) && + layout != DATALAYOUT(kUnk); + } + + size_t hash() const; + + bool operator==(const Place& other) const { + return target == other.target && precision == other.precision && + layout == other.layout && device == other.device; + } + + bool operator!=(const Place& other) const { return !(*this == other); } + + friend bool operator<(const Place& a, const Place& b); + + friend std::ostream& operator<<(std::ostream& os, const Place& other) { + os << other.DebugString(); + return os; + } + + std::string DebugString() const; +}; + +} // namespace lite_api +} // namespace paddle diff --git a/paddle/fluid/lite/core/CMakeLists.txt b/paddle/fluid/lite/core/CMakeLists.txt index f6d48c2bea5..33866fa5bcc 100644 --- a/paddle/fluid/lite/core/CMakeLists.txt +++ b/paddle/fluid/lite/core/CMakeLists.txt @@ -2,7 +2,7 @@ if (WITH_TESTING) cc_library(lite_gtest_main SRCS lite_gtest_main.cc DEPS gtest gflags) endif() lite_cc_library(target_wrapper_lite SRCS target_wrapper.cc - DEPS target_wrapper_host + DEPS target_wrapper_host place_lite X86_DEPS target_wrapper_x86 CUDA_DEPS target_wrapper_cuda) lite_cc_library(memory_lite SRCS memory.cc DEPS target_wrapper_lite) diff --git a/paddle/fluid/lite/core/target_wrapper.cc b/paddle/fluid/lite/core/target_wrapper.cc index 5ce5f3942d8..ae833d22115 100644 --- a/paddle/fluid/lite/core/target_wrapper.cc +++ b/paddle/fluid/lite/core/target_wrapper.cc @@ -17,31 +17,5 @@ #include "paddle/fluid/lite/utils/all.h" namespace paddle { -namespace lite { - -size_t Place::hash() const { - std::hash h; - size_t hash = h(static_cast(target)); - hash = hash_combine(hash, static_cast(precision)); - hash = hash_combine(hash, static_cast(layout)); - hash = hash_combine(hash, static_cast(device)); - return hash; -} - -bool operator<(const Place &a, const Place &b) { - if (a.target != b.target) return a.target < b.target; - if (a.precision != b.precision) return a.precision < b.precision; - if (a.layout != b.layout) return a.layout < b.layout; - if (a.device != b.device) return a.device < b.device; - return true; -} - -std::string Place::DebugString() const { - std::stringstream os; - os << TargetToStr(target) << "/" << PrecisionToStr(precision) << "/" - << DataLayoutToStr(layout); - return os.str(); -} - -} // namespace lite +namespace lite {} // namespace lite } // namespace paddle diff --git a/paddle/fluid/lite/core/target_wrapper.h b/paddle/fluid/lite/core/target_wrapper.h index 1d395b56904..0e9d260e55b 100644 --- a/paddle/fluid/lite/core/target_wrapper.h +++ b/paddle/fluid/lite/core/target_wrapper.h @@ -16,7 +16,9 @@ #include #include #include +#include "paddle/fluid/lite/api/place.h" #include "paddle/fluid/lite/utils/cp_logging.h" + #ifdef LITE_WITH_CUDA #include #include @@ -25,134 +27,17 @@ namespace paddle { namespace lite { -enum class TargetType : int { - kUnk = 0, - kHost, - kX86, - kCUDA, - kARM, - kOpenCL, - kAny, // any target - NUM, // number of fields. -}; -enum class PrecisionType : int { - kUnk = 0, - kFloat, - kInt8, - kInt32, - kAny, // any precision - NUM, // number of fields. -}; -enum class DataLayoutType : int { - kUnk = 0, - kNCHW, - kAny, // any data layout - NUM, // number of fields. -}; - -static size_t PrecisionTypeLength(PrecisionType type) { - switch (type) { - case PrecisionType::kFloat: - return 4; - case PrecisionType::kInt8: - return 1; - case PrecisionType::kInt32: - return 4; - default: - return 4; - } -} - -// Some helper macro to get a specific TargetType. -#define TARGET(item__) paddle::lite::TargetType::item__ -// Some helper macro to get a specific PrecisionType. -#define PRECISION(item__) paddle::lite::PrecisionType::item__ -#define DATALAYOUT(item__) paddle::lite::DataLayoutType::item__ - -static const std::string& TargetToStr(TargetType target) { - static const std::string target2string[] = {"unk", "host", "x86", "cuda", - "arm", "opencl", "any"}; - auto x = static_cast(target); - CHECK_LT(x, static_cast(TARGET(NUM))); - return target2string[x]; -} - -static const std::string& PrecisionToStr(PrecisionType precision) { - static const std::string precision2string[] = {"unk", "float", "int8_t", - "any"}; - auto x = static_cast(precision); - CHECK_LT(x, static_cast(PRECISION(NUM))); - return precision2string[x]; -} - -static const std::string& DataLayoutToStr(DataLayoutType layout) { - static const std::string datalayout2string[] = {"unk", "NCHW", "any"}; - auto x = static_cast(layout); - CHECK_LT(x, static_cast(DATALAYOUT(NUM))); - return datalayout2string[x]; -} - -static const std::string& TargetRepr(TargetType target) { - static const std::string target2string[] = { - "kUnk", "kHost", "kX86", "kCUDA", "kARM", "kOpenCL", "kAny"}; - auto x = static_cast(target); - CHECK_LT(x, static_cast(TARGET(NUM))); - return target2string[x]; -} - -static const std::string& PrecisionRepr(PrecisionType precision) { - static const std::string precision2string[] = {"kUnk", "kFloat", "kInt8", - "kInt32", "kAny"}; - auto x = static_cast(precision); - CHECK_LT(x, static_cast(PRECISION(NUM))); - return precision2string[x]; -} - -static const std::string& DataLayoutRepr(DataLayoutType layout) { - static const std::string datalayout2string[] = {"kUnk", "kNCHW", "kAny"}; - auto x = static_cast(layout); - CHECK_LT(x, static_cast(DATALAYOUT(NUM))); - return datalayout2string[x]; -} - -/* - * Place specifies the execution context of a Kernel or input/output for a - * kernel. It is used to make the analysis of the MIR more clear and accurate. - */ -struct Place { - TargetType target{TARGET(kUnk)}; - PrecisionType precision{PRECISION(kUnk)}; - DataLayoutType layout{DATALAYOUT(kUnk)}; - int16_t device{0}; // device ID - - Place() = default; - Place(TargetType target, PrecisionType precision, - DataLayoutType layout = DATALAYOUT(kNCHW), int16_t device = 0) - : target(target), precision(precision), layout(layout), device(device) {} - - bool is_valid() const { - return target != TARGET(kUnk) && precision != PRECISION(kUnk) && - layout != DATALAYOUT(kUnk); - } - - size_t hash() const; - - bool operator==(const Place& other) const { - return target == other.target && precision == other.precision && - layout == other.layout && device == other.device; - } - - bool operator!=(const Place& other) const { return !(*this == other); } - - friend bool operator<(const Place& a, const Place& b); - - friend std::ostream& operator<<(std::ostream& os, const Place& other) { - os << other.DebugString(); - return os; - } - - std::string DebugString() const; -}; +using lite_api::TargetType; +using lite_api::PrecisionType; +using lite_api::DataLayoutType; +using lite_api::PrecisionTypeLength; +using lite_api::TargetToStr; +using lite_api::Place; +using lite_api::PrecisionToStr; +using lite_api::DataLayoutToStr; +using lite_api::TargetRepr; +using lite_api::PrecisionRepr; +using lite_api::DataLayoutRepr; // Memory copy directions. enum class IoDirection { diff --git a/paddle/fluid/lite/tools/build.sh b/paddle/fluid/lite/tools/build.sh index 8ecc02e0874..a88e7786c01 100755 --- a/paddle/fluid/lite/tools/build.sh +++ b/paddle/fluid/lite/tools/build.sh @@ -123,7 +123,7 @@ function test_arm_android { echo "test name: ${test_name}" adb_work_dir="/data/local/tmp" - skip_list=("test_model_parser_lite" "test_mobilenetv1_lite" "test_mobilenetv2_lite" "test_resnet50_lite" "test_inceptionv4_lite" "test_light_api_lite" "test_apis_lite") + skip_list=("test_model_parser_lite" "test_mobilenetv1_lite" "test_mobilenetv2_lite" "test_resnet50_lite" "test_inceptionv4_lite" "test_light_api_lite" "test_apis_lite" "test_paddle_api_lite") for skip_name in ${skip_list[@]} ; do [[ $skip_name =~ (^|[[:space:]])$test_name($|[[:space:]]) ]] && echo "skip $test_name" && return done -- GitLab