// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include #include #include "paddle/fluid/lite/core/lite_tensor.h" #include "paddle/fluid/lite/utils/cp_logging.h" namespace paddle { namespace lite { #ifdef LITE_WITH_ARM typedef enum { LITE_POWER_HIGH = 0, LITE_POWER_LOW = 1, LITE_POWER_FULL = 2, LITE_POWER_NO_BIND = 3, LITE_POWER_RAND_HIGH = 4, LITE_POWER_RAND_LOW = 5 } PowerMode; typedef enum { kAPPLE = 0, kA53 = 53, kA55 = 55, kA57 = 57, kA72 = 72, kA73 = 73, kA75 = 75, kA76 = 76, kARMArch_UNKOWN = -1 } ARMArch; class DeviceInfo { public: int idx_; int max_freq_; int min_freq_; int generate_arch_; int compute_core_num_; int max_memory_; int sharemem_size_; std::string device_name_; std::string compute_ability_; std::vector L1_cache_; std::vector L2_cache_; std::vector L3_cache_; std::vector core_ids_; std::vector big_core_ids_; std::vector little_core_ids_; std::vector cluster_ids_; std::vector archs_; ARMArch arch_; // LITE_POWER_HIGH stands for using big cores, // LITE_POWER_LOW stands for using small core, // LITE_POWER_FULL stands for using all cores PowerMode mode_; std::vector active_ids_; TensorLite workspace_; int64_t count_{0}; static DeviceInfo& Global() { static auto* x = new DeviceInfo; return *x; } static void Init() { auto& info = Global(); InitInternal(&info); } void SetRunMode(PowerMode mode, int threads); void SetCache(int l1size, int l2size, int l3size); void SetArch(ARMArch arch) { arch_ = arch; } void BindDev(); PowerMode mode() const { return mode_; } int threads() const { return active_ids_.size(); } ARMArch arch() const { return arch_; } template T* workspace_data() { return workspace_.mutable_data(); } int l1_cache_size() const { return L1_cache_[active_ids_[0]]; } int l2_cache_size() const { return L2_cache_[active_ids_[0]]; } int l3_cache_size() const { return L3_cache_[active_ids_[0]]; } bool ExtendWorkspace(DDimLite dims); private: DeviceInfo() = default; static void InitInternal(DeviceInfo* dev); }; size_t arm_get_meminfo(); int arm_get_cpucount(); void arm_get_cpu_arch(std::vector* archs); bool get_cpu_info_from_name(DeviceInfo* cpu_info, std::string hardware_name); #ifdef LITE_WITH_LINUX void set_default_cache(DeviceInfo* dev); std::string arm_get_cpu_name(); int get_max_freq_khz(int cpuid); int arm_sort_cpuid_by_max_frequency(int cpu_count, std::vector* cpuids, const std::vector& cpu_freq, std::vector* cluster_ids); int check_online(const std::vector& core_ids); int set_sched_affinity(const std::vector& cpuids); #endif // LITE_WITH_LINUX #endif // LITE_WITH_ARM } // namespace lite } // namespace paddle