提交 6139494f 编写于 作者: H hangq

add doc for include file & clang-format include file

上级 60362ce7
......@@ -22,57 +22,53 @@
#include "include/ms_tensor.h"
namespace mindspore::lite {
// brief Allocator defined by MindSpore Lite
//
// note List public class and interface for reference
/// \brief Allocator defined by MindSpore Lite.
///
/// \note List public class and interface for reference.
class Allocator;
/// \brief CpuBindMode defined by MindSpore Lite.
enum CpuBindMode {
MID_CPU = -1, /**< bind mid cpu first */
HIGHER_CPU = 1, /**< bind higher cpu first */
NO_BIND = 0 /**< no bind */
};
typedef enum { DT_CPU, DT_GPU, DT_NPU } DeviceType;
/// \brief DeviceType defined by MindSpore Lite.
typedef enum {
DT_CPU, /**< CPU device type */
DT_GPU, /**< GPU device type */
DT_NPU /**< NPU device type */
} DeviceType;
// brief NPUContext defined by MindSpore Lite
/// \brief DeviceContext defined by MindSpore Lite.
typedef struct {
int freq{3};
int fmkType{0};
int modelType{0};
int deviceType{0};
std::string modelName = "default";
} NPUContext;
// brief DeviceContext defined by MindSpore Lite
typedef struct {
DeviceType type;
NPUContext npuCtx;
DeviceType type; /**< device type */
} DeviceContext;
// brief Context defined by MindSpore Lite
/// \brief Context defined by MindSpore Lite
class MS_API Context {
public:
// brief Constructor of MindSpore Lite context using default value for parameters
//
// return Instance of MindSpore Lite context.
/// \brief Constructor of MindSpore Lite context using default value for parameters.
///
/// \return Instance of MindSpore Lite Context.
Context();
// brief Constructor of MindSpore Lite context using input value for parameters
//
// param[in] threadNum Define the threadNum during the runtime.
// param[in] allocator Define the allocator for malloc.
// param[in] deviceCtx Define device information during the runtime.
/// \brief Constructor of MindSpore Lite Context using input value for parameters.
///
/// \param[in] threadNum Define the threadNum during the runtime.
/// \param[in] allocator Define the allocator for malloc.
/// \param[in] deviceCtx Define device information during the runtime.
Context(int threadNum, std::shared_ptr<Allocator> allocator, DeviceContext deviceCtx);
// brief Destructor of MindSpore Lite context
/// \brief Destructor of MindSpore Lite Context.
virtual ~Context();
public:
DeviceContext deviceCtx;
int threadNum = 2;
std::shared_ptr<Allocator> allocator;
CpuBindMode cpuBindMode = MID_CPU;
DeviceContext device_ctx_{DT_CPU};
int thread_num_ = 2; /**< thread number config for thread pool */
std::shared_ptr<Allocator> allocator = nullptr;
CpuBindMode cpu_bind_mode_ = MID_CPU;
};
} // namespace mindspore::lite
#endif // MINDSPORE_LITE_INCLUDE_CONTEXT_H_
......@@ -26,25 +26,63 @@
namespace mindspore {
namespace session {
/// \brief LiteSession defined by MindSpore Lite.
class MS_API LiteSession {
public:
virtual ~LiteSession() = default;
/// \brief Static method to create a LiteSession pointer.
///
/// \param[in] context Define the context of session to be created.
///
/// \return Pointer of MindSpore Lite LiteSession.
static LiteSession *CreateSession(lite::Context *context);
virtual void BindThread(bool ifBind) = 0;
/// \brief Destructor of MindSpore Lite LiteSession.
virtual ~LiteSession() = default;
static LiteSession *CreateSession(lite::Context *context);
/// \brief Try to bind or unbind threads in the thread pool to specified cpu core.
///
/// \param[in] if_bind Define weather to bind or unbind threads.
virtual void BindThread(bool if_bind) = 0;
/// \brief Compile MindSpore lite model.
///
/// \note CompileGraph should called before RunGraph.
///
/// \param[in] model Define the model to be compiled.
///
/// \return ErrorCode of compile graph.
virtual int CompileGraph(lite::Model *model) = 0;
virtual std::vector<tensor::MSTensor *> GetInputs() = 0;
/// \brief Get input MindSpore Lite MSTensors of model.
///
/// \return A vector of MindSpore Lite MSTensor.
virtual std::vector<tensor::MSTensor *> GetInputs() const = 0;
virtual std::vector<tensor::MSTensor *> GetInputsByName(std::string name) = 0;
/// \brief Get input MindSpore Lite MSTensors of model by node name.
///
/// \param[in] node_name Define node name.
///
/// \return A vector of MindSpore Lite MSTensor.
virtual std::vector<tensor::MSTensor *> GetInputsByName(const std::string &node_name) const = 0;
/// \brief Run model compiled by this session.
///
/// \note RunGraph should called after CompileGraph.
///
/// \return ErrorCode of run graph.
virtual int RunGraph() = 0;
virtual std::vector<tensor::MSTensor *> GetOutputs() = 0;
/// \brief Get output MindSpore Lite MSTensors of model.
///
/// \return A vector of MindSpore Lite MSTensor.
virtual std::vector<tensor::MSTensor *> GetOutputs() const = 0;
virtual std::vector<tensor::MSTensor *> GetOutputsByName(std::string name) = 0;
/// \brief Get output MindSpore Lite MSTensors of model by node name.
///
/// \param[in] node_name Define node name.
///
/// \return A vector of MindSpore Lite MSTensor.
virtual std::vector<tensor::MSTensor *> GetOutputsByName(const std::string &node_name) const = 0;
};
} // namespace session
} // namespace mindspore
......
......@@ -23,32 +23,88 @@
#include "schema/model_generated.h"
namespace mindspore {
/// \brief ModelImpl defined by MindSpore Lite.
///
/// \note List public class and interface for reference.
class ModelImpl;
namespace lite {
/// \brief Primitive defined by MindSpore Lite.
///
/// \note List public class and interface for reference.
class Primitive;
/// \brief Model defined by MindSpore Lite.
class Model {
public:
/// \brief Static method to create a Model pointer.
///
/// \param[in] model_buf Define the buffer read from a model file.
/// \param[in] size Define bytes numbers of model buffer.
///
/// \return Pointer of MindSpore Lite Model.
static std::shared_ptr<Model> Import(const char *model_buf, size_t size);
virtual ~Model() = default;
/// \brief Constructor of MindSpore Lite Model using default value for parameters.
///
/// \return Instance of MindSpore Lite Model.
Model() = default;
/// \brief Destructor of MindSpore Lite Model.
virtual ~Model() = default;
/// \brief Get MindSpore Lite Primitive by name.
///
/// \param[in] name Define name of primitive to be returned.
///
/// \return A pointer of MindSpore Lite Primitive.
lite::Primitive *GetOp(const std::string &name) const;
/// \brief Get MindSpore Lite MetaGraph.
///
/// \return A pointer of MindSpore Lite MetaGraph.
const schema::MetaGraph *GetMetaGraph() const;
std::shared_ptr<ModelImpl> GetModelImpl();
/// \brief Get MindSpore Lite ModelImpl.
///
/// \return A pointer of MindSpore Lite ModelImpl.
std::shared_ptr<ModelImpl> model_impl();
/// \brief Free MetaGraph in MindSpore Lite Model.
void FreeMetaGraph();
protected:
std::shared_ptr<ModelImpl> modelImpl = nullptr;
std::shared_ptr<ModelImpl> model_impl_ = nullptr;
};
/// \brief ModelBuilder defined by MindSpore Lite.
class ModelBuilder {
public:
/// \brief OutEdge defined by MindSpore Lite.
struct OutEdge {
std::string nodeId;
size_t outEdgeIndex;
std::string nodeId; /**< Id of a node linked by this edge */
size_t outEdgeIndex; /**< Index of this edge */
};
/// \brief Constructor of MindSpore Lite Model using default value for parameters.
///
/// \return Instance of MindSpore Lite ModelBuilder.
ModelBuilder() = default;
/// \brief Destructor of MindSpore Lite ModelBuilder.
virtual ~ModelBuilder() = default;
/// \brief Add primitive into model builder for model building.
///
/// \param[in] op Define the primitive to be added.
/// \param[in] inputs Define input edge of primitive to be added.
///
/// \return Id of the primitive added.
virtual std::string AddOp(const lite::Primitive &op, const std::vector<OutEdge> &inputs) = 0;
/// \brief Finish constructing the model.
///
/// \return A pointer of MindSpore Lite Model.
virtual Model *Construct();
};
} // namespace lite
......
......@@ -25,46 +25,86 @@
namespace mindspore {
#define MS_API __attribute__((visibility("default")))
namespace tensor {
/// \brief MSTensor defined by MindSpore Lite.
class MS_API MSTensor {
public:
/// \brief Constructor of MindSpore Lite MSTensor.
///
/// \return Instance of MindSpore Lite MSTensor.
MSTensor() = default;
// brief Create a MSTensor pointer.
//
// param data_type DataTypeId of tensor to be created.
// param shape Shape of tensor to be created.
// return MSTensor pointer.
/// \brief Static method to create a MSTensor pointer.
///
/// \param[in] data_type Define data type of tensor to be created.
/// \param[in] shape Define Shape of tensor to be created.
///
/// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum is
/// suitable for MSTensor.
///
/// \return A pointer of MSTensor.
static MSTensor *CreateTensor(TypeId data_type, const std::vector<int> &shape);
/// \brief Destructor of MindSpore Lite Model.
virtual ~MSTensor() = default;
/// \brief Get data type of the MindSpore Lite MSTensor.
///
/// \note TypeId is defined in mindspore/mindspore/core/ir/dtype/type_id.h. Only number types in TypeId enum is
/// suitable for MSTensor.
///
/// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor.
virtual TypeId data_type() const = 0;
virtual TypeId set_data_type(const TypeId data_type) = 0;
/// \brief Set data type for the MindSpore Lite MSTensor.
///
/// \param[in] data_type Define MindSpore Lite TypeId to be set into the MindSpore Lite MSTensor.
///
/// \return MindSpore Lite TypeId of the MindSpore Lite MSTensor after set.
virtual TypeId set_data_type(TypeId data_type) = 0;
/// \brief Get shape of the MindSpore Lite MSTensor.
///
/// \return A vector of int as the shape of the MindSpore Lite MSTensor.
virtual std::vector<int> shape() const = 0;
/// \brief Set shape for the MindSpore Lite MSTensor.
///
/// \param[in] shape Define A vector of int as shape to be set into the MindSpore Lite MSTensor.
///
/// \return size of shape of the MindSpore Lite MSTensor after set.
virtual size_t set_shape(const std::vector<int> &shape) = 0;
/// \brief Get size of the dimension of the MindSpore Lite MSTensor index by the parameter index.
///
/// \param[in] index Define index of dimension returned.
///
/// \return Size of dimension of the MindSpore Lite MSTensor.
virtual int DimensionSize(size_t index) const = 0;
// brief Get number of element in MSTensor.
//
// return Number of element in MSTensor.
/// \brief Get number of element in MSTensor.
///
/// \return Number of element in MSTensor.
virtual int ElementsNum() const = 0;
/// \brief Get hash of the MindSpore Lite MSTensor.
///
/// \return Hash of the MindSpore Lite MSTensor.
virtual std::size_t hash() const = 0;
// brief Get byte size of data in MSTensor.
//
// return Byte size of data in MSTensor.
/// \brief Get byte size of data in MSTensor.
///
/// \return Byte size of data in MSTensor.
virtual size_t Size() const = 0;
// brief Get pointer of data in MSTensor.
//
// The data pointer can be used to both write or read data in MSTensor.
//
// return A pointer points to data in MSTensor.
/// \brief Get pointer of data in MSTensor.
///
/// \note The data pointer can be used to both write or read data in MSTensor.
///
/// \return A pointer points to data in MSTensor.
virtual void *MutableData() const = 0;
};
using MultiTensor = std::vector<std::vector<std::shared_ptr<tensor::MSTensor>>>;
} // namespace tensor
} // namespace mindspore
#endif // MINDSPORE_INCLUDE_MS_TENSOR_H_
......@@ -24,8 +24,8 @@ Context::~Context() = default;
Context::Context(int threadNum, std::shared_ptr<Allocator> allocator, DeviceContext deviceCtx) {
this->allocator = std::move(allocator);
this->threadNum = threadNum;
this->deviceCtx = std::move(deviceCtx);
this->thread_num_ = threadNum;
this->device_ctx_ = std::move(deviceCtx);
}
} // namespace mindspore::lite
......@@ -197,7 +197,7 @@ class LiteTensor : public mindspore::tensor::MSTensor {
TypeId data_type() const override;
TypeId set_data_type(const TypeId data_type) override;
TypeId set_data_type(TypeId data_type) override;
std::vector<int> shape() const override;
......
......@@ -152,7 +152,7 @@ int LiteSession::CompileGraph(Model *model) {
return RET_OK;
}
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputs() {
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputs() const {
std::vector<mindspore::tensor::MSTensor *> ret;
for (auto &iter : this->input_map) {
auto &node_input_tensors = iter.second;
......@@ -167,7 +167,7 @@ std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputs() {
int LiteSession::RunGraph() {
MS_EXCEPTION_IF_NULL(this->context_);
SetMaxWokerNum(context_->threadNum);
SetMaxWokerNum(context_->thread_num_);
Executor executor;
return executor.Run(this->inputs, this->outputs, this->kernels, this->context_->allocator.get());
}
......@@ -178,7 +178,7 @@ int LiteSession::RunGraph(const kernel::KernelCallBack &before, const kernel::Ke
return executor.Run(this->inputs, this->outputs, this->kernels, this->context_->allocator.get(), before, after);
}
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetOutputs() {
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetOutputs() const {
std::vector<mindspore::tensor::MSTensor *> ret;
for (auto &iter : this->output_map) {
auto &node_output_tensors = iter.second;
......@@ -193,20 +193,20 @@ std::vector<mindspore::tensor::MSTensor *> LiteSession::GetOutputs() {
int LiteSession::Init(Context *context) {
MS_EXCEPTION_IF_NULL(context);
this->context_ = new (std::nothrow) Context(context->threadNum, context->allocator, context->deviceCtx);
this->context_ = new (std::nothrow) Context(context->thread_num_, context->allocator, context->device_ctx_);
if (this->context_ == nullptr) {
MS_LOG(ERROR) << "new context failed";
return RET_MEMORY_FAILED;
}
this->context_->cpuBindMode = context->cpuBindMode;
ConfigThreadPool(context->cpuBindMode, context->threadNum);
this->context_->cpu_bind_mode_ = context->cpu_bind_mode_;
ConfigThreadPool(context->cpu_bind_mode_, context->thread_num_);
auto ret = KernelRegistry::GetInstance()->Init();
if (ret != RET_OK) {
MS_LOG(ERROR) << "KernelRegistry Init Failed.";
return ret;
}
#if SUPPORT_GPU
if (context_->deviceCtx.type == DT_GPU) {
if (context_->device_ctx_.type == DT_GPU) {
auto opencl_runtime = lite::opencl::OpenCLRuntime::GetInstance();
opencl_runtime->Init();
}
......@@ -215,8 +215,8 @@ int LiteSession::Init(Context *context) {
}
void LiteSession::BindThread(bool ifBind) {
if (this->context_->cpuBindMode != NO_BIND) {
DoAllThreadBind(ifBind, static_cast<int>(this->context_->cpuBindMode));
if (this->context_->cpu_bind_mode_ != NO_BIND) {
DoAllThreadBind(ifBind, static_cast<int>(this->context_->cpu_bind_mode_));
}
}
......@@ -237,8 +237,25 @@ LiteSession::~LiteSession() {
}
}
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputsByName(std::string name) { return input_map[name]; }
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetOutputsByName(std::string name) { return output_map[name]; }
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetInputsByName(const std::string &name) const {
auto ret = input_map.find(name);
if (ret == input_map.end()) {
MS_LOG(WARNING) << "Node " << name << " is not an input node";
std::vector<mindspore::tensor::MSTensor *> empty_ret;
return empty_ret;
}
return ret->second;
}
std::vector<mindspore::tensor::MSTensor *> LiteSession::GetOutputsByName(const std::string &name) const {
auto ret = output_map.find(name);
if (ret == output_map.end()) {
MS_LOG(WARNING) << "Node " << name << " is not an output node";
std::vector<mindspore::tensor::MSTensor *> empty_ret;
return empty_ret;
}
return ret->second;
}
} // namespace lite
session::LiteSession *session::LiteSession::CreateSession(lite::Context *context) {
......
......@@ -42,17 +42,17 @@ class LiteSession : public session::LiteSession {
int CompileGraph(Model *model) override;
std::vector<mindspore::tensor::MSTensor *> GetInputs() override;
std::vector<mindspore::tensor::MSTensor *> GetInputs() const override;
std::vector<mindspore::tensor::MSTensor *> GetInputsByName(std::string name) override;
std::vector<mindspore::tensor::MSTensor *> GetInputsByName(const std::string &name) const override;
int RunGraph() override;
int RunGraph(const kernel::KernelCallBack &before = nullptr, const kernel::KernelCallBack &after = nullptr);
std::vector<mindspore::tensor::MSTensor *> GetOutputs() override;
std::vector<mindspore::tensor::MSTensor *> GetOutputs() const override;
std::vector<mindspore::tensor::MSTensor *> GetOutputsByName(std::string name) override;
std::vector<mindspore::tensor::MSTensor *> GetOutputsByName(const std::string &name) const override;
protected:
int ConvertTensors(const lite::Model *model);
......@@ -75,4 +75,3 @@ class LiteSession : public session::LiteSession {
} // namespace lite
} // namespace mindspore
#endif // MINDSPORE_LITE_SRC_LITE_SESSION_H_
......@@ -26,28 +26,28 @@ namespace mindspore::lite {
std::shared_ptr<Model> Model::Import(const char *model_buf, size_t size) {
auto model = std::make_shared<Model>();
model->modelImpl = ModelImpl::Import(model_buf, size);
model->model_impl_ = ModelImpl::Import(model_buf, size);
return model;
}
lite::Primitive *Model::GetOp(const std::string &name) const {
MS_EXCEPTION_IF_NULL(modelImpl);
return const_cast<Primitive *>(modelImpl->GetOp(name));
MS_EXCEPTION_IF_NULL(model_impl_);
return const_cast<Primitive *>(model_impl_->GetOp(name));
}
void Model::FreeMetaGraph() {
MS_EXCEPTION_IF_NULL(modelImpl);
return modelImpl->FreeMetaGraph();
MS_EXCEPTION_IF_NULL(model_impl_);
return model_impl_->FreeMetaGraph();
}
const schema::MetaGraph *Model::GetMetaGraph() const {
MS_EXCEPTION_IF_NULL(modelImpl);
return modelImpl->GetMetaGraph();
MS_EXCEPTION_IF_NULL(model_impl_);
return model_impl_->GetMetaGraph();
}
std::shared_ptr<ModelImpl> Model::GetModelImpl() {
MS_EXCEPTION_IF_NULL(modelImpl);
return this->modelImpl;
std::shared_ptr<ModelImpl> Model::model_impl() {
MS_EXCEPTION_IF_NULL(model_impl_);
return this->model_impl_;
}
} // namespace mindspore::lite
......@@ -29,8 +29,8 @@ class ConcatBaseCPUKernel : public LiteKernel {
public:
ConcatBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
opParameter->thread_num_ = ctx->threadNum;
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
opParameter->thread_num_ = ctx->thread_num_;
concat_param_ = reinterpret_cast<ConcatParameter *>(opParameter);
}
......
......@@ -87,7 +87,7 @@ int ConvolutionBaseCPUKernel::Init() {
conv_param_->output_h_ = output->Height();
conv_param_->output_w_ = output->Width();
conv_param_->output_channel_ = output->Channel();
conv_param_->thread_num_ = ctx_->threadNum;
conv_param_->thread_num_ = ctx_->thread_num_;
return RET_OK;
}
......
......@@ -38,8 +38,8 @@ class ConvolutionBaseCPUKernel : public LiteKernel {
public:
ConvolutionBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
opParameter->thread_num_ = ctx->threadNum;
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
opParameter->thread_num_ = ctx->thread_num_;
conv_param_ = reinterpret_cast<ConvParameter *>(opParameter);
}
~ConvolutionBaseCPUKernel() override;
......
......@@ -28,8 +28,8 @@ class CropBaseCPUKernel : public LiteKernel {
public:
CropBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
opParameter->thread_num_ = ctx->threadNum;
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
opParameter->thread_num_ = ctx->thread_num_;
}
~CropBaseCPUKernel() = default;
......
......@@ -29,7 +29,7 @@ class FullconnectionBaseCPUKernel : public LiteKernel {
public:
FullconnectionBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
fc_param_ = reinterpret_cast<MatMulParameter *>(opParameter);
}
~FullconnectionBaseCPUKernel() = default;
......
......@@ -30,7 +30,7 @@ class PoolingBaseCPUKernel : public LiteKernel {
public:
PoolingBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
pooling_param_ = reinterpret_cast<PoolingParameter *>(opParameter);
}
~PoolingBaseCPUKernel() = default;
......
......@@ -29,7 +29,7 @@ class PriorBoxCPUKernel : public LiteKernel {
public:
PriorBoxCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
prior_box_param_ = reinterpret_cast<PriorBoxParameter *>(opParameter);
}
~PriorBoxCPUKernel() = default;
......
......@@ -25,7 +25,7 @@ class QuantDTypeCastCPUKernel : public LiteKernel {
public:
QuantDTypeCastCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {}
~QuantDTypeCastCPUKernel() = default;
int Init() override;
......
......@@ -28,7 +28,7 @@ class ReshapeBaseCPUKernel : public LiteKernel {
public:
ReshapeBaseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
reshape_param_ = reinterpret_cast<ReshapeParameter *>(opParameter);
}
~ReshapeBaseCPUKernel() = default;
......
......@@ -26,7 +26,7 @@ class ActivationCPUKernel : public LiteKernel {
public:
ActivationCPUKernel(OpParameter *param, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(param, inputs, outputs), thread_count_(ctx->threadNum) {
: LiteKernel(param, inputs, outputs), thread_count_(ctx->thread_num_) {
type_ = (reinterpret_cast<ActivationParameter *>(param))->type_;
alpha_ = (reinterpret_cast<ActivationParameter *>(param))->alpha_;
}
......
......@@ -101,7 +101,7 @@ kernel::LiteKernel *CpuAddNFp32KernelCreator(const std::vector<lite::tensor::Ten
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_AddN);
op_parameter->thread_num_ = ctx->threadNum;
op_parameter->thread_num_ = ctx->thread_num_;
auto *kernel = new (std::nothrow) AddNCPUKernel(op_parameter, inputs, outputs);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new AddNCPUKernel fail!";
......
......@@ -49,7 +49,7 @@ class ArithmeticCPUKernel : public LiteKernel {
public:
ArithmeticCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->thread_num_) {
switch (parameter->type_) {
case PrimitiveType_Mul:
arithmetic_run_ = ElementMul;
......
......@@ -45,7 +45,7 @@ class ArithmeticSelfCPUKernel : public LiteKernel {
public:
explicit ArithmeticSelfCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
switch (parameter->type_) {
case PrimitiveType_Abs:
arithmeticSelf_run_ = ElementAbs;
......
......@@ -95,7 +95,7 @@ kernel::LiteKernel *CpuCastFp32KernelCreator(const std::vector<lite::tensor::Ten
MS_LOG(ERROR) << "Input context is nullptr!";
return nullptr;
}
if (ctx->threadNum == 0) {
if (ctx->thread_num_ == 0) {
MS_LOG(ERROR) << "context thread num is 0!";
return nullptr;
}
......
......@@ -26,7 +26,7 @@ class CastCPUKernel : public LiteKernel {
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs) {
if (ctx != nullptr) {
thread_num_ = ctx->threadNum;
thread_num_ = ctx->thread_num_;
}
}
......
......@@ -31,7 +31,7 @@ class ExpandDimsCPUKernel : public LiteKernel {
public:
ExpandDimsCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {}
~ExpandDimsCPUKernel() override = default;
int Init() override;
......
......@@ -29,7 +29,7 @@ class FillCPUKernel : public LiteKernel {
public:
FillCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {}
~FillCPUKernel() override = default;
int Init() override;
......
......@@ -26,7 +26,7 @@ class GatherCPUKernel : public LiteKernel {
public:
GatherCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->thread_num_) {}
~GatherCPUKernel() override = default;
int Init() override;
......
......@@ -31,7 +31,7 @@ class GatherNdCPUKernel : public LiteKernel {
public:
GatherNdCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {}
~GatherNdCPUKernel() override;
int Init() override;
......
......@@ -26,7 +26,7 @@ class LocalResponseNormCPUKernel : public LiteKernel {
public:
LocalResponseNormCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->thread_num_) {}
~LocalResponseNormCPUKernel() override = default;
int Init() override;
......
......@@ -58,7 +58,7 @@ int OneHotCPUKernel::Init() {
MS_LOG(ERROR) << "OneHot context nullptr";
return RET_NULL_PTR;
}
thread_num_ = context_->threadNum;
thread_num_ = context_->thread_num_;
const int indices_rank = static_cast<int>(inputs_.at(0)->shape().size());
if (axis_ < 0) {
......@@ -148,7 +148,7 @@ int OneHotCPUKernel::GetParams() {
}
int OneHotCPUKernel::Run() {
int error_code = LiteBackendParallelLaunch(RunOneHot, this, context_->threadNum);
int error_code = LiteBackendParallelLaunch(RunOneHot, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "OneHot function error error_code[" << error_code << "]";
return RET_ERROR;
......
......@@ -79,7 +79,7 @@ int PadCPUKernel::RunImpl(int task_id) {
auto input_data = reinterpret_cast<float *>(input->Data());
auto output_data = reinterpret_cast<float *>(output->Data());
Pad(input_data, output_data, in_, out_, pad_param_->paddings_, task_id, context_->threadNum);
Pad(input_data, output_data, in_, out_, pad_param_->paddings_, task_id, context_->thread_num_);
return RET_OK;
}
......@@ -92,7 +92,7 @@ int PadCPUKernel::Run() {
// todo parallel memset to save time
memset(output_data, 0, output_size * sizeof(float));
int error_code = LiteBackendParallelLaunch(PadImpl, this, context_->threadNum);
int error_code = LiteBackendParallelLaunch(PadImpl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Pad run error, error_code[" << error_code << "]";
return RET_ERROR;
......
......@@ -28,7 +28,7 @@ class PowerCPUKernel : public LiteKernel {
PowerCPUKernel(PowerParameter *param, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(reinterpret_cast<OpParameter *>(param), inputs, outputs),
thread_count_(ctx->threadNum),
thread_count_(ctx->thread_num_),
power_(param->power_),
scale_(param->scale_),
shift_(param->shift_) {}
......
......@@ -30,7 +30,7 @@ class PReluCPUKernel : public LiteKernel {
public:
PReluCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
prelu_param_ = (reinterpret_cast<PReluParameter *>(opParameter));
}
~PReluCPUKernel() = default;
......
......@@ -138,7 +138,7 @@ int ReduceCPUKernel::Init() {
int ReduceCPUKernel::CallReduceUnit(int task_id) {
auto ret = reducer_(outer_size_, inner_size_, axis_size_, src_data_, tmp_shape_.data(), dst_data_, task_id,
context_->threadNum);
context_->thread_num_);
return ret;
}
......@@ -167,7 +167,7 @@ int ReduceCPUKernel::Run() {
inner_size_ *= tmp_shape_[k];
}
axis_size_ = tmp_shape_[axis];
auto error_code = LiteBackendParallelLaunch(ReduceImpl, this, context_->threadNum);
auto error_code = LiteBackendParallelLaunch(ReduceImpl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]";
return RET_ERROR;
......@@ -187,7 +187,7 @@ int ReduceCPUKernel::Run() {
}
axis_size_ = tmp_shape_[last_reduce_axis];
dst_data_ = reinterpret_cast<float *>(outputs_.at(0)->Data());
auto error_code = LiteBackendParallelLaunch(ReduceImpl, this, context_->threadNum);
auto error_code = LiteBackendParallelLaunch(ReduceImpl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Reduce run error, error_code[" << error_code << "]";
return RET_ERROR;
......
......@@ -172,10 +172,10 @@ int ResizeCPUKernel::RunImpl(int task_id) {
layout_convertor_(input_data, exec_input_data_, input->Batch(), input->Height() * input->Width(),
input->Channel());
ret = ResizeBilinear(exec_input_data_, output_data, inputs_[0]->shape().data(), outputs_[0]->shape().data(),
align_corners_, task_id, context_->threadNum);
align_corners_, task_id, context_->thread_num_);
} else {
ret = ResizeBilinear(input_data, output_data, inputs_[0]->shape().data(), outputs_[0]->shape().data(),
align_corners_, task_id, context_->threadNum);
align_corners_, task_id, context_->thread_num_);
}
break;
}
......@@ -188,10 +188,10 @@ int ResizeCPUKernel::RunImpl(int task_id) {
layout_convertor_(input_data, exec_input_data_, input->Batch(), input->Height() * input->Width(),
input->Channel());
ret = ResizeNearestNeighbor(exec_input_data_, output_data, input_shape.data(), outputs_[0]->shape().data(),
task_id, context_->threadNum);
task_id, context_->thread_num_);
} else {
ret = ResizeNearestNeighbor(input_data, output_data, input_shape.data(), outputs_[0]->shape().data(), task_id,
context_->threadNum);
context_->thread_num_);
}
break;
}
......@@ -205,7 +205,7 @@ int ResizeCPUKernel::RunImpl(int task_id) {
}
int ResizeCPUKernel::Run() {
int error_code = LiteBackendParallelLaunch(ResizeImpl, this, context_->threadNum);
int error_code = LiteBackendParallelLaunch(ResizeImpl, this, context_->thread_num_);
if (error_code != RET_OK) {
MS_LOG(ERROR) << "Resize run error, error_code[" << error_code << "]";
return RET_ERROR;
......
......@@ -30,7 +30,7 @@ class ReverseCPUKernel : public LiteKernel {
public:
ReverseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {}
~ReverseCPUKernel() {
if (tmp_ != nullptr) {
free(tmp_);
......
......@@ -26,7 +26,7 @@ class ScaleCPUKernel : public LiteKernel {
public:
explicit ScaleCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {}
~ScaleCPUKernel() override = default;
int Init() override;
......
......@@ -27,7 +27,7 @@ class ScatterNDCPUKernel : public LiteKernel {
public:
explicit ScatterNDCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {}
~ScatterNDCPUKernel() override = default;
int Init() override;
......
......@@ -107,7 +107,7 @@ kernel::LiteKernel *CpuSliceFp32KernelCreator(const std::vector<lite::tensor::Te
return nullptr;
}
MS_ASSERT(desc.type == schema::PrimitiveType_Slice);
op_parameter->thread_num_ = ctx->threadNum;
op_parameter->thread_num_ = ctx->thread_num_;
auto *kernel = new (std::nothrow) SliceCPUKernel(op_parameter, inputs, outputs);
if (kernel == nullptr) {
MS_LOG(ERROR) << "new SliceCPUKernel fail!";
......
......@@ -25,7 +25,7 @@ class SpaceToDepthCPUKernel : public LiteKernel {
public:
SpaceToDepthCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {}
~SpaceToDepthCPUKernel() = default;
int SpaceToDepth(int task_id);
......
......@@ -30,7 +30,7 @@ class SparseToDenseCPUKernel : public LiteKernel {
public:
SparseToDenseCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
s2d_param_ = (reinterpret_cast<SparseToDenseParameter *>(opParameter));
}
~SparseToDenseCPUKernel() = default;
......
......@@ -26,7 +26,7 @@ class SplitCPUKernel : public LiteKernel {
public:
SplitCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {}
~SplitCPUKernel() override = default;
int Init() override;
......
......@@ -26,7 +26,7 @@ class StridedSliceCPUKernel : public LiteKernel {
public:
StridedSliceCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_num_(ctx->thread_num_) {}
~StridedSliceCPUKernel() override = default;
int Init() override;
......
......@@ -28,7 +28,7 @@ class UnsqueezeCPUKernel : public LiteKernel {
public:
UnsqueezeCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {}
~UnsqueezeCPUKernel() = default;
int Init() override;
......
......@@ -30,7 +30,7 @@ class WhereCPUKernel : public LiteKernel {
public:
WhereCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
where_param_ = reinterpret_cast<WhereParameter *>(opParameter);
}
~WhereCPUKernel() = default;
......
......@@ -26,7 +26,7 @@ class QuantizedAddCPUKernel : public LiteKernel {
public:
explicit QuantizedAddCPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx_->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx_->thread_num_) {}
~QuantizedAddCPUKernel() override {}
int Init() override;
......
......@@ -37,7 +37,7 @@ class ArithmeticSelfInt8CPUKernel : public LiteKernel {
public:
explicit ArithmeticSelfInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx->thread_num_) {
switch (parameter->type_) {
case PrimitiveType_Round:
arithmeticSelf_run_ = ElementRound;
......
......@@ -26,7 +26,7 @@ class HswishInt8CPUKernel : public LiteKernel {
public:
HswishInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->threadNum) {}
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->thread_num_) {}
~HswishInt8CPUKernel() override = default;
int Init() override;
......
......@@ -26,7 +26,7 @@ class MulInt8CPUKernel : public LiteKernel {
public:
explicit MulInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx_->threadNum) {}
: LiteKernel(parameter, inputs, outputs), ctx_(ctx), thread_count_(ctx_->thread_num_) {}
~MulInt8CPUKernel() override {};
int Init() override;
......
......@@ -29,7 +29,7 @@ class PadInt8CPUKernel : public LiteKernel {
explicit PadInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs) {
opParameter->thread_num_ = ctx->threadNum;
opParameter->thread_num_ = ctx->thread_num_;
pad_param_ = reinterpret_cast<PadParameter *>(opParameter);
}
~PadInt8CPUKernel() override { FreeQuantParam(); };
......
......@@ -27,7 +27,7 @@ class ReluInt8CPUKernel : public LiteKernel {
public:
ReluInt8CPUKernel(OpParameter *parameter, const std::vector<lite::tensor::Tensor *> &inputs,
const std::vector<lite::tensor::Tensor *> &outputs, const lite::Context *ctx)
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->threadNum) {
: LiteKernel(parameter, inputs, outputs), thread_count_(ctx->thread_num_) {
type_ = (reinterpret_cast<ActivationParameter *>(parameter))->type_;
}
~ReluInt8CPUKernel() override = default;
......
......@@ -159,7 +159,7 @@ kernel::LiteKernel *Scheduler::ScheduleNode(const std::vector<tensor::Tensor *>
MS_ASSERT(nullptr != primitive);
auto data_type = inputs.front()->data_type();
kernel::KernelKey desc{kernel::KERNEL_ARCH::kCPU, data_type, primitive->Type()};
if (context_->deviceCtx.type == DT_GPU) {
if (context_->device_ctx_.type == DT_GPU) {
desc.arch = kernel::KERNEL_ARCH::kGPU;
auto *kernel = KernelFactory::GetInstance()->GetKernel(inputs, outputs, primitive, context_, desc);
if (nullptr != kernel) {
......
......@@ -47,8 +47,8 @@ TEST_F(ConverterTest, TestOCR_02) {
}
TEST_F(ConverterTest, TestHebing) {
const char *argv[] = {"./converter", "--fmk=CAFFE", "--modelFile=./hiai/model_hebing_3branch.caffemodel",
"--weightFile=./models/model_hebing_3branch.prototxt",
const char *argv[] = {"./converter", "--fmk=CAFFE", "--modelFile=./hiai/model_hebing_3branch.prototxt",
"--weightFile=./models/model_hebing_3branch.caffemodel",
"--outputFile=./models/model_hebing_3branch"};
auto status = RunConverter(5, argv);
ASSERT_EQ(status, RET_OK);
......
......@@ -104,9 +104,9 @@ TEST_F(InferTest, TestConvNode) {
meta_graph.reset();
content = nullptr;
auto context = new lite::Context;
context->cpuBindMode = lite::NO_BIND;
context->deviceCtx.type = lite::DT_CPU;
context->threadNum = 4;
context->cpu_bind_mode_ = lite::NO_BIND;
context->device_ctx_.type = lite::DT_CPU;
context->thread_num_ = 4;
auto session = session::LiteSession::CreateSession(context);
ASSERT_NE(nullptr, session);
auto ret = session->CompileGraph(model.get());
......@@ -201,9 +201,9 @@ TEST_F(InferTest, TestAddNode) {
meta_graph.reset();
content = nullptr;
auto context = new lite::Context;
context->cpuBindMode = lite::NO_BIND;
context->deviceCtx.type = lite::DT_GPU;
context->threadNum = 4;
context->cpu_bind_mode_ = lite::NO_BIND;
context->device_ctx_.type = lite::DT_GPU;
context->thread_num_ = 4;
auto session = session::LiteSession::CreateSession(context);
ASSERT_NE(nullptr, session);
auto ret = session->CompileGraph(model.get());
......@@ -252,9 +252,9 @@ TEST_F(InferTest, TestModel) {
ASSERT_NE(nullptr, model);
delete[] buf[0];
auto context = new lite::Context;
context->cpuBindMode = lite::NO_BIND;
context->deviceCtx.type = lite::DT_CPU;
context->threadNum = 4;
context->cpu_bind_mode_ = lite::NO_BIND;
context->device_ctx_.type = lite::DT_CPU;
context->thread_num_ = 4;
auto session = session::LiteSession::CreateSession(context);
ASSERT_NE(nullptr, session);
auto ret = session->CompileGraph(model.get());
......
......@@ -112,7 +112,7 @@ TEST_F(TestActivationFp32, HSwishFp32) {
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
lite::Context ctx;
ctx.threadNum = 7;
ctx.thread_num_ = 7;
kernel::LiteKernel *kernel =
creator(inputs_tensor, outputs_tensor, reinterpret_cast<OpParameter *>(&op_param), &ctx, desc);
ASSERT_NE(kernel, nullptr);
......
......@@ -292,7 +292,7 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test1) {
std::vector<lite::tensor::Tensor *> outputs_;
auto conv_param = new ConvParameter();
lite::Context *ctx = new lite::Context();
ctx->threadNum = 1;
ctx->thread_num_ = 1;
float *correct;
int total_size = Conv1x1TestInit1(&inputs_, &outputs_, conv_param, &correct);
kernel::Convolution1x1CPUKernel *conv1x1 =
......@@ -360,7 +360,7 @@ TEST_F(TestConv1x1Fp32, Conv1x1Test2) {
std::vector<lite::tensor::Tensor *> outputs_;
auto conv_param = new ConvParameter();
lite::Context *ctx = new lite::Context();
ctx->threadNum = 2;
ctx->thread_num_ = 2;
float *correct;
int total_size = Conv1x1TestInit2(&inputs_, &outputs_, conv_param, &correct);
kernel::Convolution1x1CPUKernel *conv1x1 =
......
......@@ -293,7 +293,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest1) {
std::vector<lite::tensor::Tensor *> outputs_;
ConvParameter *deconv_param = new ConvParameter();
lite::Context *ctx = new lite::Context();
ctx->threadNum = 1;
ctx->thread_num_ = 1;
float *correct;
int total_size = DeConvTestInit1(&inputs_, &outputs_, deconv_param, &correct);
kernel::DeConvolutionCPUKernel *deconv =
......@@ -364,7 +364,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest2) {
float *correct;
int total_size = DeConvTestInit2(&inputs_, &outputs_, deconv_param, &correct);
lite::Context *ctx = new lite::Context;
ctx->threadNum = 4;
ctx->thread_num_ = 4;
kernel::DeConvolutionCPUKernel *deconv =
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx);
......@@ -443,7 +443,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest3) {
float *correct;
int total_size = DeConvTestInit3(&inputs_, &outputs_, deconv_param, &correct);
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::DeConvolutionCPUKernel *deconv =
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx);
......@@ -515,7 +515,7 @@ TEST_F(TestDeConvolutionFp32, DeConvTest4) {
float *correct;
int total_size = DeConvTestInit4(&inputs_, &outputs_, deconv_param, &correct);
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::DeConvolutionCPUKernel *deconv =
new kernel::DeConvolutionCPUKernel(reinterpret_cast<OpParameter *>(deconv_param), inputs_, outputs_, ctx);
......
......@@ -77,7 +77,7 @@ TEST_F(TestFcFp32, FcTest1) {
float *correct;
int total_size = FcTestInit1(&inputs_, &outputs_, matmul_param, &correct);
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::FullconnectionCPUKernel *fc =
new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);
......@@ -134,7 +134,7 @@ TEST_F(TestFcFp32, FcTest2) {
float *correct;
int total_size = FcTestInit2(&inputs_, &outputs_, matmul_param, &correct);
lite::Context *ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
kernel::FullconnectionCPUKernel *fc =
new kernel::FullconnectionCPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);
......
......@@ -75,7 +75,7 @@ TEST_F(SpaceToDepthTestFp32, SpaceToDepthTest2) {
op_param.block_size_ = 2;
lite::Context ctx;
ctx.threadNum = 3;
ctx.thread_num_ = 3;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeFloat32, schema::PrimitiveType_SpaceToDepth};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......
......@@ -65,7 +65,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant0_thread2) {
ArithmeticSelfParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Floor;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Floor};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......@@ -124,7 +124,7 @@ TEST_F(TestArithmeticSelfInt8, floor_quant1_thread2) {
ArithmeticSelfParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Floor;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Floor};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......@@ -183,7 +183,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant0_thread2) {
ArithmeticSelfParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Round;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Floor};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......@@ -242,7 +242,7 @@ TEST_F(TestArithmeticSelfInt8, round_quant1_thread2) {
ArithmeticSelfParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Round;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Floor};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......@@ -301,7 +301,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant0_thread2) {
ArithmeticSelfParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Ceil;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Floor};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......@@ -360,7 +360,7 @@ TEST_F(TestArithmeticSelfInt8, ceil_quant1_thread2) {
ArithmeticSelfParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Ceil;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_Floor};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......
......@@ -66,7 +66,7 @@ TEST_F(TestCropInt8, crop_1d_axis0_offset0_quant0_thread2) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
op_param.axis_ = 0;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -129,7 +129,7 @@ TEST_F(TestCropInt8, crop_2d_axis1_offset0_quant0_thread2) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
op_param.axis_ = 1;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -192,7 +192,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread0) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
op_param.axis_ = 1;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -256,7 +256,7 @@ TEST_F(TestCropInt8, crop_3d_axis1_offset0_quant0_thread2) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
op_param.axis_ = 1;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -319,7 +319,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread0) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
op_param.axis_ = 0;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -382,7 +382,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset0_quant0_thread0) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
op_param.axis_ = 1;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -445,7 +445,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant0_thread0) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
op_param.axis_ = 0;
op_param.offset_[0] = 1;
op_param.offset_[1] = 1;
......@@ -511,7 +511,7 @@ TEST_F(TestCropInt8, crop_4d_axis1_offset1_quant1_thread0) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 1;
ctx->thread_num_ = 1;
op_param.axis_ = 0;
op_param.offset_[0] = 1;
op_param.offset_[1] = 1;
......@@ -579,7 +579,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread2) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
op_param.axis_ = 0;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......@@ -644,7 +644,7 @@ TEST_F(TestCropInt8, crop_4d_axis0_offset0_quant0_thread3) {
CropParameter op_param;
op_param.op_parameter_.type_ = schema::PrimitiveType_Crop;
lite::Context *ctx = new lite::Context;
ctx->threadNum = 3;
ctx->thread_num_ = 3;
op_param.axis_ = 0;
op_param.offset_[0] = 1;
op_param.offset_size_ = 1;
......
......@@ -247,7 +247,7 @@ TEST_F(TestDeconvInt8, DeConvInt8Test1) {
std::vector<lite::tensor::Tensor *> outputs_;
auto deconv_param = new ConvParameter();
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
int8_t *correct;
int total_size = DeConvInt8TestInit1(&inputs_, &outputs_, deconv_param, &correct);
mindspore::kernel::DeConvInt8CPUKernel *deconv =
......
......@@ -123,7 +123,7 @@ TEST_F(TestFcInt8, fcint8) {
int output_zp;
int total_size = FcInt8TestInit(&inputs_, &outputs_, matmul_param, &correct, &output_scale, &output_zp);
lite::Context *ctx = new lite::Context;
ctx->threadNum = 2;
ctx->thread_num_ = 2;
kernel::FullconnectionInt8CPUKernel *fc =
new kernel::FullconnectionInt8CPUKernel(reinterpret_cast<OpParameter *>(matmul_param), inputs_, outputs_, ctx);
......
......@@ -62,7 +62,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest1) {
outputs_tensor.emplace_back(&output_tensor);
lite::Context ctx;
ctx.threadNum = 3;
ctx.thread_num_ = 3;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_QuantDTypeCast};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......@@ -108,7 +108,7 @@ TEST_F(QuantDTypeCastTestFp32, QuantDTypeCastTest2) {
outputs_tensor.emplace_back(&output_tensor);
lite::Context ctx;
ctx.threadNum = 3;
ctx.thread_num_ = 3;
kernel::KernelKey desc = {kernel::KERNEL_ARCH::kCPU, kNumberTypeInt8, schema::PrimitiveType_QuantDTypeCast};
auto creator = lite::KernelRegistry::GetInstance()->GetCreator(desc);
ASSERT_NE(creator, nullptr);
......
......@@ -350,19 +350,19 @@ int Benchmark::RunBenchmark(const std::string &deviceType) {
auto model = lite::Model::Import(graphBuf, size);
auto context = new lite::Context;
if (_flags->device == "CPU") {
context->deviceCtx.type = lite::DT_CPU;
context->device_ctx_.type = lite::DT_CPU;
} else {
context->deviceCtx.type = lite::DT_NPU;
context->device_ctx_.type = lite::DT_NPU;
}
if (_flags->cpuBindMode == -1) {
context->cpuBindMode = MID_CPU;
context->cpu_bind_mode_ = MID_CPU;
} else if (_flags->cpuBindMode == 0) {
context->cpuBindMode = HIGHER_CPU;
context->cpu_bind_mode_ = HIGHER_CPU;
} else {
context->cpuBindMode = NO_BIND;
context->cpu_bind_mode_ = NO_BIND;
}
context->threadNum = _flags->numThreads;
context->thread_num_ = _flags->numThreads;
session = session::LiteSession::CreateSession(context);
auto ret = session->CompileGraph(model.get());
if (ret != RET_OK) {
......
......@@ -898,9 +898,9 @@ STATUS PostTrainingQuantizer::DoQuantize(FuncGraphPtr funcGraph) {
auto model = lite::Model::Import(content, size);
Context ctx;
ctx.deviceCtx.type = DT_CPU;
ctx.threadNum = calibrator_->GetThreadNum();
ctx.cpuBindMode = MID_CPU;
ctx.device_ctx_.type = DT_CPU;
ctx.thread_num_ = calibrator_->GetThreadNum();
ctx.cpu_bind_mode_ = MID_CPU;
session_ = dynamic_cast<mindspore::lite::LiteSession *>(session::LiteSession::CreateSession(&ctx));
if (session_ == nullptr) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册