From 5b35740b272f199f738a89bbdf77007942882647 Mon Sep 17 00:00:00 2001 From: yejianwu Date: Mon, 28 May 2018 10:49:06 +0800 Subject: [PATCH] refactor arg_helper --- mace/core/arg_helper.cc | 136 +++++++----------- mace/core/arg_helper.h | 50 ++----- mace/core/buffer.h | 4 +- mace/core/mace.cc | 4 +- mace/core/net.cc | 12 +- mace/core/net.h | 4 +- mace/core/operator.cc | 4 +- mace/core/operator.h | 24 +--- mace/core/registry.h | 2 +- .../runtime/hexagon/hexagon_control_wrapper.h | 2 +- mace/core/runtime/hexagon/quantize.h | 2 +- mace/core/tensor.h | 4 +- mace/core/workspace.cc | 6 +- mace/core/workspace.h | 2 +- mace/ops/activation.h | 6 +- mace/ops/batch_norm.h | 4 +- mace/ops/batch_to_space.h | 4 +- mace/ops/buffer_to_image.h | 2 +- mace/ops/channel_shuffle.h | 2 +- mace/ops/concat.h | 4 +- mace/ops/conv_2d.h | 8 +- mace/ops/conv_pool_2d_base.h | 8 +- mace/ops/deconv_2d.h | 2 +- mace/ops/depth_to_space.h | 2 +- mace/ops/depthwise_conv2d.h | 6 +- mace/ops/eltwise.h | 6 +- mace/ops/folded_batch_norm.h | 6 +- mace/ops/fully_connected.h | 6 +- mace/ops/image_to_buffer.h | 2 +- mace/ops/local_response_norm.h | 8 +- mace/ops/pad.h | 4 +- mace/ops/pooling.h | 4 +- mace/ops/proposal.h | 16 +-- mace/ops/psroi_align.h | 6 +- mace/ops/reshape.h | 2 +- mace/ops/resize_bilinear.h | 4 +- mace/ops/slice.h | 4 +- mace/ops/space_to_batch.h | 4 +- mace/ops/space_to_depth.h | 4 +- mace/ops/transpose.h | 2 +- mace/ops/winograd_inverse_transform.h | 12 +- mace/ops/winograd_transform.h | 4 +- mace/proto/mace.proto | 11 +- mace/utils/logging.h | 2 +- mace/utils/timer.h | 2 +- mace/utils/utils.h | 12 +- 46 files changed, 183 insertions(+), 242 deletions(-) diff --git a/mace/core/arg_helper.cc b/mace/core/arg_helper.cc index 6fd9b63c..54a9efc9 100644 --- a/mace/core/arg_helper.cc +++ b/mace/core/arg_helper.cc @@ -20,112 +20,80 @@ namespace mace { -ArgumentHelper::ArgumentHelper(const OperatorDef &def) { +ProtoArgHelper::ProtoArgHelper(const OperatorDef &def) { for (auto &arg : def.arg()) { - if (arg_map_.find(arg.name()) != arg_map_.end()) { - LOG(WARNING) << "Duplicated argument name found in operator def: " - << def.name() << " " << arg.name(); + if (arg_map_.count(arg.name())) { + LOG(WARNING) << "Duplicated argument " << arg.name() + << " found in operator " << def.name(); } - arg_map_[arg.name()] = arg; } } -ArgumentHelper::ArgumentHelper(const NetDef &netdef) { +ProtoArgHelper::ProtoArgHelper(const NetDef &netdef) { for (auto &arg : netdef.arg()) { MACE_CHECK(arg_map_.count(arg.name()) == 0, - "Duplicated argument name found in net def."); + "Duplicated argument found in net def."); arg_map_[arg.name()] = arg; } } -bool ArgumentHelper::HasArgument(const std::string &name) const { - return arg_map_.count(name); -} - namespace { -// Helper function to verify that conversion between types won't loose any -// significant bit. template -bool SupportsLosslessConversion(const InputType &value) { +inline bool IsCastLossless(const InputType &value) { return static_cast(static_cast(value)) == value; } } -#define INSTANTIATE_GET_SINGLE_ARGUMENT(T, fieldname, \ - enforce_lossless_conversion) \ - template <> \ - T ArgumentHelper::GetSingleArgument(const std::string &name, \ - const T &default_value) const { \ - if (arg_map_.count(name) == 0) { \ - VLOG(3) << "Using default parameter value " << default_value \ - << " for parameter " << name; \ - return default_value; \ - } \ - MACE_CHECK(arg_map_.at(name).has_##fieldname(), "Argument ", name, \ - " does not have the right field: expected field " #fieldname); \ - auto value = arg_map_.at(name).fieldname(); \ - if (enforce_lossless_conversion) { \ - auto supportsConversion = \ - SupportsLosslessConversion(value); \ - MACE_CHECK(supportsConversion, "Value", value, " of argument ", name, \ - "cannot be represented correctly in a target type"); \ - } \ - return value; \ - } \ - template <> \ - bool ArgumentHelper::HasSingleArgumentOfType( \ - const std::string &name) const { \ - if (arg_map_.count(name) == 0) { \ - return false; \ - } \ - return arg_map_.at(name).has_##fieldname(); \ +#define MACE_GET_OPTIONAL_ARGUMENT_FUNC(T, fieldname, lossless_conversion) \ + template <> \ + T ProtoArgHelper::GetOptionalArg(const std::string &arg_name, \ + const T &default_value) const { \ + if (arg_map_.count(arg_name) == 0) { \ + VLOG(3) << "Using default parameter " << default_value << " for " \ + << arg_name; \ + return default_value; \ + } \ + MACE_CHECK(arg_map_.at(arg_name).has_##fieldname(), "Argument ", arg_name, \ + " not found!"); \ + auto value = arg_map_.at(arg_name).fieldname(); \ + if (lossless_conversion) { \ + const bool castLossless = IsCastLossless(value); \ + MACE_CHECK(castLossless, "Value", value, " of argument ", arg_name, \ + "cannot be casted losslessly to a target type"); \ + } \ + return value; \ } -INSTANTIATE_GET_SINGLE_ARGUMENT(float, f, false) -INSTANTIATE_GET_SINGLE_ARGUMENT(double, f, false) -INSTANTIATE_GET_SINGLE_ARGUMENT(bool, i, false) -INSTANTIATE_GET_SINGLE_ARGUMENT(int8_t, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(int16_t, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(int, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(int64_t, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(uint8_t, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(uint16_t, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(size_t, i, true) -INSTANTIATE_GET_SINGLE_ARGUMENT(std::string, s, false) -#undef INSTANTIATE_GET_SINGLE_ARGUMENT +MACE_GET_OPTIONAL_ARGUMENT_FUNC(float, f, false) +MACE_GET_OPTIONAL_ARGUMENT_FUNC(bool, i, false) +MACE_GET_OPTIONAL_ARGUMENT_FUNC(int, i, true) +MACE_GET_OPTIONAL_ARGUMENT_FUNC(std::string, s, false) +#undef MACE_GET_OPTIONAL_ARGUMENT_FUNC -#define INSTANTIATE_GET_REPEATED_ARGUMENT(T, fieldname, \ - enforce_lossless_conversion) \ - template <> \ - std::vector ArgumentHelper::GetRepeatedArgument( \ - const std::string &name, const std::vector &default_value) const { \ - if (arg_map_.count(name) == 0) { \ - return default_value; \ - } \ - std::vector values; \ - for (const auto &v : arg_map_.at(name).fieldname()) { \ - if (enforce_lossless_conversion) { \ - auto supportsConversion = \ - SupportsLosslessConversion(v); \ - MACE_CHECK(supportsConversion, "Value", v, " of argument ", name, \ - "cannot be represented correctly in a target type"); \ - } \ - values.push_back(v); \ - } \ - return values; \ +#define MACE_GET_REPEATED_ARGUMENT_FUNC(T, fieldname, lossless_conversion) \ + template <> \ + std::vector ProtoArgHelper::GetRepeatedArgs( \ + const std::string &arg_name, const std::vector &default_value) \ + const { \ + if (arg_map_.count(arg_name) == 0) { \ + return default_value; \ + } \ + std::vector values; \ + for (const auto &v : arg_map_.at(arg_name).fieldname()) { \ + if (lossless_conversion) { \ + const bool castLossless = IsCastLossless(v); \ + MACE_CHECK(castLossless, "Value", v, " of argument ", arg_name, \ + "cannot be casted losslessly to a target type"); \ + } \ + values.push_back(v); \ + } \ + return values; \ } -INSTANTIATE_GET_REPEATED_ARGUMENT(float, floats, false) -INSTANTIATE_GET_REPEATED_ARGUMENT(double, floats, false) -INSTANTIATE_GET_REPEATED_ARGUMENT(bool, ints, false) -INSTANTIATE_GET_REPEATED_ARGUMENT(int8_t, ints, true) -INSTANTIATE_GET_REPEATED_ARGUMENT(int16_t, ints, true) -INSTANTIATE_GET_REPEATED_ARGUMENT(int, ints, true) -INSTANTIATE_GET_REPEATED_ARGUMENT(int64_t, ints, true) -INSTANTIATE_GET_REPEATED_ARGUMENT(uint8_t, ints, true) -INSTANTIATE_GET_REPEATED_ARGUMENT(uint16_t, ints, true) -INSTANTIATE_GET_REPEATED_ARGUMENT(size_t, ints, true) -#undef INSTANTIATE_GET_REPEATED_ARGUMENT +MACE_GET_REPEATED_ARGUMENT_FUNC(float, floats, false) +MACE_GET_REPEATED_ARGUMENT_FUNC(int, ints, true) +MACE_GET_REPEATED_ARGUMENT_FUNC(int64_t, ints, true) +#undef MACE_GET_REPEATED_ARGUMENT_FUNC } // namespace mace diff --git a/mace/core/arg_helper.h b/mace/core/arg_helper.h index afbd3b33..3e1cca93 100644 --- a/mace/core/arg_helper.h +++ b/mace/core/arg_helper.h @@ -15,61 +15,41 @@ #ifndef MACE_CORE_ARG_HELPER_H_ #define MACE_CORE_ARG_HELPER_H_ +#include #include #include -#include #include "mace/proto/mace.pb.h" #include "mace/public/mace.h" namespace mace { -/** - * @brief A helper class to index into arguments. - * - * This helper helps us to more easily index into a set of arguments - * that are present in the operator. To save memory, the argument helper - * does not copy the operator def, so one would need to make sure that the - * lifetime of the OperatorDef object outlives that of the ArgumentHelper. - */ -class ArgumentHelper { +// Refer to caffe2 +class ProtoArgHelper { public: - template - static bool HasArgument(const Def &def, const std::string &name) { - return ArgumentHelper(def).HasArgument(name); - } - template - static T GetSingleArgument(const Def &def, - const std::string &name, - const T &default_value) { - return ArgumentHelper(def).GetSingleArgument(name, default_value); + static T GetOptionalArg(const Def &def, + const std::string &arg_name, + const T &default_value) { + return ProtoArgHelper(def).GetOptionalArg(arg_name, default_value); } template - static bool HasSingleArgumentOfType(const Def &def, const std::string &name) { - return ArgumentHelper(def).HasSingleArgumentOfType(name); - } - - template - static std::vector GetRepeatedArgument( + static std::vector GetRepeatedArgs( const Def &def, - const std::string &name, + const std::string &arg_name, const std::vector &default_value = std::vector()) { - return ArgumentHelper(def).GetRepeatedArgument(name, default_value); + return ProtoArgHelper(def).GetRepeatedArgs(arg_name, default_value); } - explicit ArgumentHelper(const OperatorDef &def); - explicit ArgumentHelper(const NetDef &netdef); - bool HasArgument(const std::string &name) const; + explicit ProtoArgHelper(const OperatorDef &def); + explicit ProtoArgHelper(const NetDef &netdef); template - T GetSingleArgument(const std::string &name, const T &default_value) const; - template - bool HasSingleArgumentOfType(const std::string &name) const; + T GetOptionalArg(const std::string &arg_name, const T &default_value) const; template - std::vector GetRepeatedArgument( - const std::string &name, + std::vector GetRepeatedArgs( + const std::string &arg_name, const std::vector &default_value = std::vector()) const; private: diff --git a/mace/core/buffer.h b/mace/core/buffer.h index 8ef5015c..b349cf4b 100644 --- a/mace/core/buffer.h +++ b/mace/core/buffer.h @@ -213,7 +213,7 @@ class Buffer : public BufferBase { void *mapped_buf_; bool is_data_owner_; - DISABLE_COPY_AND_ASSIGN(Buffer); + MACE_DISABLE_COPY_AND_ASSIGN(Buffer); }; class Image : public BufferBase { @@ -330,7 +330,7 @@ class Image : public BufferBase { void *buf_; void *mapped_buf_; - DISABLE_COPY_AND_ASSIGN(Image); + MACE_DISABLE_COPY_AND_ASSIGN(Image); }; class BufferSlice : public BufferBase { diff --git a/mace/core/mace.cc b/mace/core/mace.cc index f16f9f4f..bd834ecf 100644 --- a/mace/core/mace.cc +++ b/mace/core/mace.cc @@ -110,7 +110,7 @@ class MaceEngine::Impl { std::unique_ptr hexagon_controller_; #endif - DISABLE_COPY_AND_ASSIGN(Impl); + MACE_DISABLE_COPY_AND_ASSIGN(Impl); }; MaceEngine::Impl::Impl(DeviceType device_type) @@ -146,7 +146,7 @@ MaceStatus MaceEngine::Impl::Init( hexagon_controller_->SetDebugLevel( static_cast(mace::logging::LogMessage::MinVLogLevel())); int dsp_mode = - ArgumentHelper::GetSingleArgument(*net_def, "dsp_mode", 0); + ProtoArgHelper::GetOptionalArg(*net_def, "dsp_mode", 0); hexagon_controller_->SetGraphMode(dsp_mode); MACE_CHECK(hexagon_controller_->SetupGraph(*net_def, model_data), "hexagon setup graph error"); diff --git a/mace/core/net.cc b/mace/core/net.cc index ccfc4a81..ea4b0721 100644 --- a/mace/core/net.cc +++ b/mace/core/net.cc @@ -42,7 +42,7 @@ SerialNet::SerialNet(const std::shared_ptr op_registry, const auto &operator_def = net_def->op(idx); // TODO(liuqi): refactor based on PB const int op_device = - ArgumentHelper::GetSingleArgument( + ProtoArgHelper::GetOptionalArg( operator_def, "device", static_cast(device_type_)); if (op_device == type) { VLOG(3) << "Creating operator " << operator_def.name() << "(" @@ -97,12 +97,12 @@ MaceStatus SerialNet::Run(RunMetadata *run_metadata) { type.compare("FusedConv2D") == 0 || type.compare("DepthwiseConv2d") == 0 || type.compare("Pooling") == 0) { - strides = op->GetRepeatedArgument("strides"); - padding_type = op->GetSingleArgument("padding", -1); - paddings = op->GetRepeatedArgument("padding_values"); - dilations = op->GetRepeatedArgument("dilations"); + strides = op->GetRepeatedArgs("strides"); + padding_type = op->GetOptionalArg("padding", -1); + paddings = op->GetRepeatedArgs("padding_values"); + dilations = op->GetRepeatedArgs("dilations"); if (type.compare("Pooling") == 0) { - kernels = op->GetRepeatedArgument("kernels"); + kernels = op->GetRepeatedArgs("kernels"); } else { kernels = op->Input(1)->shape(); } diff --git a/mace/core/net.h b/mace/core/net.h index efc04d5e..e901188e 100644 --- a/mace/core/net.h +++ b/mace/core/net.h @@ -44,7 +44,7 @@ class NetBase { std::string name_; const std::shared_ptr op_registry_; - DISABLE_COPY_AND_ASSIGN(NetBase); + MACE_DISABLE_COPY_AND_ASSIGN(NetBase); }; class SerialNet : public NetBase { @@ -61,7 +61,7 @@ class SerialNet : public NetBase { std::vector > operators_; DeviceType device_type_; - DISABLE_COPY_AND_ASSIGN(SerialNet); + MACE_DISABLE_COPY_AND_ASSIGN(SerialNet); }; std::unique_ptr CreateNet( diff --git a/mace/core/operator.cc b/mace/core/operator.cc index 908a934d..25504d5b 100644 --- a/mace/core/operator.cc +++ b/mace/core/operator.cc @@ -55,9 +55,9 @@ std::unique_ptr OperatorRegistry::CreateOperator( Workspace *ws, DeviceType type, const NetMode mode) const { - const int dtype = ArgumentHelper::GetSingleArgument( + const int dtype = ProtoArgHelper::GetOptionalArg( operator_def, "T", static_cast(DT_FLOAT)); - const int op_mode_i = ArgumentHelper::GetSingleArgument( + const int op_mode_i = ProtoArgHelper::GetOptionalArg( operator_def, "mode", static_cast(NetMode::NORMAL)); const NetMode op_mode = static_cast(op_mode_i); if (op_mode == mode) { diff --git a/mace/core/operator.h b/mace/core/operator.h index 118279a3..06a20d88 100644 --- a/mace/core/operator.h +++ b/mace/core/operator.h @@ -35,28 +35,18 @@ class OperatorBase { explicit OperatorBase(const OperatorDef &operator_def, Workspace *ws); virtual ~OperatorBase() noexcept {} - inline bool HasArgument(const std::string &name) const { - MACE_CHECK(operator_def_, "operator_def was null!"); - return ArgumentHelper::HasArgument(*operator_def_, name); - } template - inline T GetSingleArgument(const std::string &name, - const T &default_value) const { + inline T GetOptionalArg(const std::string &name, + const T &default_value) const { MACE_CHECK(operator_def_, "operator_def was null!"); - return ArgumentHelper::GetSingleArgument( + return ProtoArgHelper::GetOptionalArg( *operator_def_, name, default_value); } template - inline bool HasSingleArgumentOfType(const std::string &name) const { - MACE_CHECK(operator_def_, "operator_def was null!"); - return ArgumentHelper::HasSingleArgumentOfType( - *operator_def_, name); - } - template - inline std::vector GetRepeatedArgument( + inline std::vector GetRepeatedArgs( const std::string &name, const std::vector &default_value = {}) const { MACE_CHECK(operator_def_, "operator_def was null!"); - return ArgumentHelper::GetRepeatedArgument( + return ProtoArgHelper::GetRepeatedArgs( *operator_def_, name, default_value); } @@ -93,7 +83,7 @@ class OperatorBase { std::vector inputs_; std::vector outputs_; - DISABLE_COPY_AND_ASSIGN(OperatorBase); + MACE_DISABLE_COPY_AND_ASSIGN(OperatorBase); }; template @@ -188,7 +178,7 @@ class OperatorRegistry { private: RegistryType registry_; - DISABLE_COPY_AND_ASSIGN(OperatorRegistry); + MACE_DISABLE_COPY_AND_ASSIGN(OperatorRegistry); }; MACE_DECLARE_REGISTRY(OpRegistry, diff --git a/mace/core/registry.h b/mace/core/registry.h index f2762dab..0cc7ebf5 100644 --- a/mace/core/registry.h +++ b/mace/core/registry.h @@ -51,7 +51,7 @@ class Registry { std::map registry_; std::mutex register_mutex_; - DISABLE_COPY_AND_ASSIGN(Registry); + MACE_DISABLE_COPY_AND_ASSIGN(Registry); }; template diff --git a/mace/core/runtime/hexagon/hexagon_control_wrapper.h b/mace/core/runtime/hexagon/hexagon_control_wrapper.h index cfa6661d..4e4d8499 100644 --- a/mace/core/runtime/hexagon/hexagon_control_wrapper.h +++ b/mace/core/runtime/hexagon/hexagon_control_wrapper.h @@ -61,7 +61,7 @@ class HexagonControlWrapper { uint32_t num_inputs_; uint32_t num_outputs_; - DISABLE_COPY_AND_ASSIGN(HexagonControlWrapper); + MACE_DISABLE_COPY_AND_ASSIGN(HexagonControlWrapper); }; } // namespace mace diff --git a/mace/core/runtime/hexagon/quantize.h b/mace/core/runtime/hexagon/quantize.h index 635106f3..23f17077 100644 --- a/mace/core/runtime/hexagon/quantize.h +++ b/mace/core/runtime/hexagon/quantize.h @@ -47,7 +47,7 @@ class Quantizer { float *stepsize, float *recip_stepsize); - DISABLE_COPY_AND_ASSIGN(Quantizer); + MACE_DISABLE_COPY_AND_ASSIGN(Quantizer); }; } // namespace mace diff --git a/mace/core/tensor.h b/mace/core/tensor.h index a97800c2..f2f9763a 100644 --- a/mace/core/tensor.h +++ b/mace/core/tensor.h @@ -348,7 +348,7 @@ class Tensor { const Tensor *tensor_; std::vector mapped_image_pitch_; - DISABLE_COPY_AND_ASSIGN(MappingGuard); + MACE_DISABLE_COPY_AND_ASSIGN(MappingGuard); }; private: @@ -361,7 +361,7 @@ class Tensor { bool is_buffer_owner_; std::string name_; - DISABLE_COPY_AND_ASSIGN(Tensor); + MACE_DISABLE_COPY_AND_ASSIGN(Tensor); }; } // namespace mace diff --git a/mace/core/workspace.cc b/mace/core/workspace.cc index 545ace63..02354d50 100644 --- a/mace/core/workspace.cc +++ b/mace/core/workspace.cc @@ -136,11 +136,11 @@ MaceStatus Workspace::CreateOutputTensorBuffer(const NetDef &net_def, for (auto &op : net_def.op()) { // TODO(liuqi): refactor based on PB const int op_device = - ArgumentHelper::GetSingleArgument( + ProtoArgHelper::GetOptionalArg( op, "device", static_cast(device_type)); if (op_device == device_type && !op.mem_id().empty()) { const DataType op_dtype = static_cast( - ArgumentHelper::GetSingleArgument( + ProtoArgHelper::GetOptionalArg( op, "T", static_cast(DT_FLOAT))); if (op_dtype != DataType::DT_INVALID) { dtype = op_dtype; @@ -182,7 +182,7 @@ MaceStatus Workspace::CreateOutputTensorBuffer(const NetDef &net_def, for (auto &op : net_def.op()) { // TODO(liuqi): refactor based on PB const int op_device = - ArgumentHelper::GetSingleArgument( + ProtoArgHelper::GetOptionalArg( op, "device", static_cast(device_type)); if (op_device == device_type && !op.mem_id().empty()) { auto mem_ids = op.mem_id(); diff --git a/mace/core/workspace.h b/mace/core/workspace.h index 7399562d..38e8777b 100644 --- a/mace/core/workspace.h +++ b/mace/core/workspace.h @@ -65,7 +65,7 @@ class Workspace { std::unique_ptr host_scratch_buffer_; - DISABLE_COPY_AND_ASSIGN(Workspace); + MACE_DISABLE_COPY_AND_ASSIGN(Workspace); }; } // namespace mace diff --git a/mace/ops/activation.h b/mace/ops/activation.h index ce148054..e9578e51 100644 --- a/mace/ops/activation.h +++ b/mace/ops/activation.h @@ -29,9 +29,9 @@ class ActivationOp : public Operator { ActivationOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), functor_(kernels::StringToActivationType( - OperatorBase::GetSingleArgument("activation", - "NOOP")), - static_cast(OperatorBase::GetSingleArgument( + OperatorBase::GetOptionalArg("activation", + "NOOP")), + static_cast(OperatorBase::GetOptionalArg( "max_limit", 0.0f))) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/batch_norm.h b/mace/ops/batch_norm.h index 4712353e..966b039f 100644 --- a/mace/ops/batch_norm.h +++ b/mace/ops/batch_norm.h @@ -28,8 +28,8 @@ class BatchNormOp : public Operator { BatchNormOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), functor_(false, kernels::ActivationType::NOOP, 0.0f) { - epsilon_ = OperatorBase::GetSingleArgument("epsilon", - static_cast(1e-4)); + epsilon_ = OperatorBase::GetOptionalArg("epsilon", + static_cast(1e-4)); } MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/batch_to_space.h b/mace/ops/batch_to_space.h index 05fc676e..b95d0c33 100644 --- a/mace/ops/batch_to_space.h +++ b/mace/ops/batch_to_space.h @@ -29,8 +29,8 @@ class BatchToSpaceNDOp : public Operator { public: BatchToSpaceNDOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(OperatorBase::GetRepeatedArgument("crops", {0, 0, 0, 0}), - OperatorBase::GetRepeatedArgument("block_shape", {1, 1}), + functor_(OperatorBase::GetRepeatedArgs("crops", {0, 0, 0, 0}), + OperatorBase::GetRepeatedArgs("block_shape", {1, 1}), true) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/buffer_to_image.h b/mace/ops/buffer_to_image.h index 1c32fd14..ae8de696 100644 --- a/mace/ops/buffer_to_image.h +++ b/mace/ops/buffer_to_image.h @@ -31,7 +31,7 @@ class BufferToImageOp : public Operator { const Tensor *input_tensor = this->Input(INPUT); kernels::BufferType type = - static_cast(OperatorBase::GetSingleArgument( + static_cast(OperatorBase::GetOptionalArg( "buffer_type", static_cast(kernels::CONV2D_FILTER))); Tensor *output = this->Output(OUTPUT); diff --git a/mace/ops/channel_shuffle.h b/mace/ops/channel_shuffle.h index 22e1e211..246b286b 100644 --- a/mace/ops/channel_shuffle.h +++ b/mace/ops/channel_shuffle.h @@ -28,7 +28,7 @@ class ChannelShuffleOp : public Operator { public: ChannelShuffleOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), - group_(OperatorBase::GetSingleArgument("group", 1)), + group_(OperatorBase::GetOptionalArg("group", 1)), functor_(this->group_) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/concat.h b/mace/ops/concat.h index b2fcc37b..51477105 100644 --- a/mace/ops/concat.h +++ b/mace/ops/concat.h @@ -28,13 +28,13 @@ class ConcatOp : public Operator { public: ConcatOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(OperatorBase::GetSingleArgument("axis", 3)) {} + functor_(OperatorBase::GetOptionalArg("axis", 3)) {} MaceStatus Run(StatsFuture *future) override { MACE_CHECK(this->InputSize() >= 2) << "There must be at least two inputs to concat"; const std::vector input_list = this->Inputs(); - const int32_t concat_axis = OperatorBase::GetSingleArgument("axis", 3); + const int32_t concat_axis = OperatorBase::GetOptionalArg("axis", 3); const int32_t input_dims = input_list[0]->dim_size(); const int32_t axis = concat_axis < 0 ? concat_axis + input_dims : concat_axis; diff --git a/mace/ops/conv_2d.h b/mace/ops/conv_2d.h index 9c353ca0..5e691268 100644 --- a/mace/ops/conv_2d.h +++ b/mace/ops/conv_2d.h @@ -35,10 +35,10 @@ class Conv2dOp : public ConvPool2dOpBase { this->paddings_, this->dilations_.data(), kernels::StringToActivationType( - OperatorBase::GetSingleArgument("activation", - "NOOP")), - OperatorBase::GetSingleArgument("max_limit", 0.0f), - static_cast(OperatorBase::GetSingleArgument( + OperatorBase::GetOptionalArg("activation", + "NOOP")), + OperatorBase::GetOptionalArg("max_limit", 0.0f), + static_cast(OperatorBase::GetOptionalArg( "is_filter_transformed", false)), ws->GetScratchBuffer(D)) {} diff --git a/mace/ops/conv_pool_2d_base.h b/mace/ops/conv_pool_2d_base.h index 4ac5da52..c446782c 100644 --- a/mace/ops/conv_pool_2d_base.h +++ b/mace/ops/conv_pool_2d_base.h @@ -28,12 +28,12 @@ class ConvPool2dOpBase : public Operator { public: ConvPool2dOpBase(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - strides_(OperatorBase::GetRepeatedArgument("strides")), - padding_type_(static_cast(OperatorBase::GetSingleArgument( + strides_(OperatorBase::GetRepeatedArgs("strides")), + padding_type_(static_cast(OperatorBase::GetOptionalArg( "padding", static_cast(SAME)))), - paddings_(OperatorBase::GetRepeatedArgument("padding_values")), + paddings_(OperatorBase::GetRepeatedArgs("padding_values")), dilations_( - OperatorBase::GetRepeatedArgument("dilations", {1, 1})) {} + OperatorBase::GetRepeatedArgs("dilations", {1, 1})) {} protected: std::vector strides_; diff --git a/mace/ops/deconv_2d.h b/mace/ops/deconv_2d.h index 33d934e3..c73bea86 100644 --- a/mace/ops/deconv_2d.h +++ b/mace/ops/deconv_2d.h @@ -32,7 +32,7 @@ class Deconv2dOp : public ConvPool2dOpBase { functor_(this->strides_.data(), this->padding_type_, this->paddings_, - OperatorBase::GetRepeatedArgument("output_shape"), + OperatorBase::GetRepeatedArgs("output_shape"), kernels::ActivationType::NOOP, 0.0f) {} diff --git a/mace/ops/depth_to_space.h b/mace/ops/depth_to_space.h index e96ac897..0df9ecbd 100644 --- a/mace/ops/depth_to_space.h +++ b/mace/ops/depth_to_space.h @@ -29,7 +29,7 @@ class DepthToSpaceOp : public Operator { public: DepthToSpaceOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - block_size_(OperatorBase::GetSingleArgument("block_size", 1)), + block_size_(OperatorBase::GetOptionalArg("block_size", 1)), functor_(this->block_size_, true) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/depthwise_conv2d.h b/mace/ops/depthwise_conv2d.h index 37b82720..2d6b1388 100644 --- a/mace/ops/depthwise_conv2d.h +++ b/mace/ops/depthwise_conv2d.h @@ -36,9 +36,9 @@ class DepthwiseConv2dOp : public ConvPool2dOpBase { this->paddings_, this->dilations_.data(), kernels::StringToActivationType( - OperatorBase::GetSingleArgument("activation", - "NOOP")), - OperatorBase::GetSingleArgument("max_limit", 0.0f)) {} + OperatorBase::GetOptionalArg("activation", + "NOOP")), + OperatorBase::GetOptionalArg("max_limit", 0.0f)) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); diff --git a/mace/ops/eltwise.h b/mace/ops/eltwise.h index 3c63c080..95c48b78 100644 --- a/mace/ops/eltwise.h +++ b/mace/ops/eltwise.h @@ -27,10 +27,10 @@ class EltwiseOp : public Operator { EltwiseOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), functor_(static_cast( - OperatorBase::GetSingleArgument( + OperatorBase::GetOptionalArg( "type", static_cast(kernels::EltwiseType::NONE))), - OperatorBase::GetRepeatedArgument("coeff"), - OperatorBase::GetSingleArgument("x", 1.0)) {} + OperatorBase::GetRepeatedArgs("coeff"), + OperatorBase::GetOptionalArg("x", 1.0)) {} MaceStatus Run(StatsFuture *future) override { const Tensor* input0 = this->Input(0); diff --git a/mace/ops/folded_batch_norm.h b/mace/ops/folded_batch_norm.h index 40e3e113..c9047ef8 100644 --- a/mace/ops/folded_batch_norm.h +++ b/mace/ops/folded_batch_norm.h @@ -30,9 +30,9 @@ class FoldedBatchNormOp : public Operator { : Operator(operator_def, ws), functor_(true, kernels::StringToActivationType( - OperatorBase::GetSingleArgument("activation", - "NOOP")), - OperatorBase::GetSingleArgument("max_limit", 0.0f)) {} + OperatorBase::GetOptionalArg("activation", + "NOOP")), + OperatorBase::GetOptionalArg("max_limit", 0.0f)) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); diff --git a/mace/ops/fully_connected.h b/mace/ops/fully_connected.h index 0c21efc3..c2539169 100644 --- a/mace/ops/fully_connected.h +++ b/mace/ops/fully_connected.h @@ -29,9 +29,9 @@ class FullyConnectedOp : public Operator { FullyConnectedOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), functor_(kernels::StringToActivationType( - OperatorBase::GetSingleArgument("activation", - "NOOP")), - OperatorBase::GetSingleArgument("max_limit", 0.0f)) {} + OperatorBase::GetOptionalArg("activation", + "NOOP")), + OperatorBase::GetOptionalArg("max_limit", 0.0f)) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); diff --git a/mace/ops/image_to_buffer.h b/mace/ops/image_to_buffer.h index 88265948..1365e1a8 100644 --- a/mace/ops/image_to_buffer.h +++ b/mace/ops/image_to_buffer.h @@ -32,7 +32,7 @@ class ImageToBufferOp : public Operator { Tensor *output = this->Output(OUTPUT); kernels::BufferType type = - static_cast(OperatorBase::GetSingleArgument( + static_cast(OperatorBase::GetOptionalArg( "buffer_type", static_cast(kernels::CONV2D_FILTER))); return functor_(input, type, output, future); } diff --git a/mace/ops/local_response_norm.h b/mace/ops/local_response_norm.h index 6938de65..502a3404 100644 --- a/mace/ops/local_response_norm.h +++ b/mace/ops/local_response_norm.h @@ -27,10 +27,10 @@ class LocalResponseNormOp : public Operator { LocalResponseNormOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), functor_() { - depth_radius_ = OperatorBase::GetSingleArgument("depth_radius", 5); - bias_ = OperatorBase::GetSingleArgument("bias", 1.0f); - alpha_ = OperatorBase::GetSingleArgument("alpha", 1.0f); - beta_ = OperatorBase::GetSingleArgument("beta", 0.5f); + depth_radius_ = OperatorBase::GetOptionalArg("depth_radius", 5); + bias_ = OperatorBase::GetOptionalArg("bias", 1.0f); + alpha_ = OperatorBase::GetOptionalArg("alpha", 1.0f); + beta_ = OperatorBase::GetOptionalArg("beta", 0.5f); } MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/pad.h b/mace/ops/pad.h index 843cf6fe..3ab03fd6 100644 --- a/mace/ops/pad.h +++ b/mace/ops/pad.h @@ -28,8 +28,8 @@ class PadOp : public Operator { public: PadOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), - functor_(OperatorBase::GetRepeatedArgument("paddings"), - OperatorBase::GetSingleArgument("constant_value", 0.0)) + functor_(OperatorBase::GetRepeatedArgs("paddings"), + OperatorBase::GetOptionalArg("constant_value", 0.0)) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/pooling.h b/mace/ops/pooling.h index a0f95d08..af46c0cd 100644 --- a/mace/ops/pooling.h +++ b/mace/ops/pooling.h @@ -29,9 +29,9 @@ class PoolingOp : public ConvPool2dOpBase { public: PoolingOp(const OperatorDef &op_def, Workspace *ws) : ConvPool2dOpBase(op_def, ws), - kernels_(OperatorBase::GetRepeatedArgument("kernels")), + kernels_(OperatorBase::GetRepeatedArgs("kernels")), pooling_type_( - static_cast(OperatorBase::GetSingleArgument( + static_cast(OperatorBase::GetOptionalArg( "pooling_type", static_cast(AVG)))), functor_(pooling_type_, kernels_.data(), diff --git a/mace/ops/proposal.h b/mace/ops/proposal.h index 1c1b280f..36021add 100644 --- a/mace/ops/proposal.h +++ b/mace/ops/proposal.h @@ -26,14 +26,14 @@ class ProposalOp : public Operator { public: ProposalOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), - functor_(OperatorBase::GetSingleArgument("min_size", 16), - OperatorBase::GetSingleArgument("nms_thresh", 0.7), - OperatorBase::GetSingleArgument("pre_nms_top_n", 6000), - OperatorBase::GetSingleArgument("post_nms_top_n", 300), - OperatorBase::GetSingleArgument("feat_stride", 0), - OperatorBase::GetSingleArgument("base_size", 12), - OperatorBase::GetRepeatedArgument("scales"), - OperatorBase::GetRepeatedArgument("ratios")) {} + functor_(OperatorBase::GetOptionalArg("min_size", 16), + OperatorBase::GetOptionalArg("nms_thresh", 0.7), + OperatorBase::GetOptionalArg("pre_nms_top_n", 6000), + OperatorBase::GetOptionalArg("post_nms_top_n", 300), + OperatorBase::GetOptionalArg("feat_stride", 0), + OperatorBase::GetOptionalArg("base_size", 12), + OperatorBase::GetRepeatedArgs("scales"), + OperatorBase::GetRepeatedArgs("ratios")) {} MaceStatus Run(StatsFuture *future) override { const Tensor *rpn_cls_prob = this->Input(RPN_CLS_PROB); diff --git a/mace/ops/psroi_align.h b/mace/ops/psroi_align.h index 1f60bc30..82bd0c44 100644 --- a/mace/ops/psroi_align.h +++ b/mace/ops/psroi_align.h @@ -26,9 +26,9 @@ class PSROIAlignOp : public Operator { public: PSROIAlignOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), - functor_(OperatorBase::GetSingleArgument("spatial_scale", 0), - OperatorBase::GetSingleArgument("output_dim", 0), - OperatorBase::GetSingleArgument("group_size", 0)) {} + functor_(OperatorBase::GetOptionalArg("spatial_scale", 0), + OperatorBase::GetOptionalArg("output_dim", 0), + OperatorBase::GetOptionalArg("group_size", 0)) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); diff --git a/mace/ops/reshape.h b/mace/ops/reshape.h index 2d145d09..371e3b40 100644 --- a/mace/ops/reshape.h +++ b/mace/ops/reshape.h @@ -28,7 +28,7 @@ class ReshapeOp : public Operator { public: ReshapeOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - shape_(OperatorBase::GetRepeatedArgument("shape")) {} + shape_(OperatorBase::GetRepeatedArgs("shape")) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); diff --git a/mace/ops/resize_bilinear.h b/mace/ops/resize_bilinear.h index 6e186f7d..571009c4 100644 --- a/mace/ops/resize_bilinear.h +++ b/mace/ops/resize_bilinear.h @@ -27,8 +27,8 @@ class ResizeBilinearOp : public Operator { ResizeBilinearOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), functor_( - OperatorBase::GetRepeatedArgument("size", {-1, -1}), - OperatorBase::GetSingleArgument("align_corners", false)) {} + OperatorBase::GetRepeatedArgs("size", {-1, -1}), + OperatorBase::GetOptionalArg("align_corners", false)) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input = this->Input(0); diff --git a/mace/ops/slice.h b/mace/ops/slice.h index 0dd36b1d..567707ff 100644 --- a/mace/ops/slice.h +++ b/mace/ops/slice.h @@ -28,14 +28,14 @@ class SliceOp : public Operator { public: SliceOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(OperatorBase::GetSingleArgument("axis", 3)) {} + functor_(OperatorBase::GetOptionalArg("axis", 3)) {} MaceStatus Run(StatsFuture *future) override { MACE_CHECK(this->OutputSize() >= 2) << "There must be at least two outputs for slicing"; const Tensor *input = this->Input(INPUT); const std::vector output_list = this->Outputs(); - const int32_t slice_axis = OperatorBase::GetSingleArgument("axis", 3); + const int32_t slice_axis = OperatorBase::GetOptionalArg("axis", 3); MACE_CHECK((input->dim(slice_axis) % this->OutputSize()) == 0) << "Outputs do not split input equally."; diff --git a/mace/ops/space_to_batch.h b/mace/ops/space_to_batch.h index a67e868c..ebeb7975 100644 --- a/mace/ops/space_to_batch.h +++ b/mace/ops/space_to_batch.h @@ -30,8 +30,8 @@ class SpaceToBatchNDOp : public Operator { SpaceToBatchNDOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), functor_( - OperatorBase::GetRepeatedArgument("paddings", {0, 0, 0, 0}), - OperatorBase::GetRepeatedArgument("block_shape", {1, 1}), + OperatorBase::GetRepeatedArgs("paddings", {0, 0, 0, 0}), + OperatorBase::GetRepeatedArgs("block_shape", {1, 1}), false) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/space_to_depth.h b/mace/ops/space_to_depth.h index bccf8b07..33c3d1a5 100644 --- a/mace/ops/space_to_depth.h +++ b/mace/ops/space_to_depth.h @@ -29,7 +29,7 @@ class SpaceToDepthOp : public Operator { public: SpaceToDepthOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(OperatorBase::GetSingleArgument("block_size", 1), false) { + functor_(OperatorBase::GetOptionalArg("block_size", 1), false) { } MaceStatus Run(StatsFuture *future) override { @@ -37,7 +37,7 @@ class SpaceToDepthOp : public Operator { Tensor *output = this->Output(OUTPUT); MACE_CHECK(input->dim_size() == 4, "input dim should be 4"); const int block_size = - OperatorBase::GetSingleArgument("block_size", 1); + OperatorBase::GetOptionalArg("block_size", 1); index_t input_height; index_t input_width; index_t input_depth; diff --git a/mace/ops/transpose.h b/mace/ops/transpose.h index 626adbe5..877078d0 100644 --- a/mace/ops/transpose.h +++ b/mace/ops/transpose.h @@ -28,7 +28,7 @@ class TransposeOp : public Operator { public: TransposeOp(const OperatorDef &operator_def, Workspace *ws) : Operator(operator_def, ws), - dims_(OperatorBase::GetRepeatedArgument("dims")), + dims_(OperatorBase::GetRepeatedArgs("dims")), functor_(dims_) {} MaceStatus Run(StatsFuture *future) override { diff --git a/mace/ops/winograd_inverse_transform.h b/mace/ops/winograd_inverse_transform.h index dfcc0fd1..71807b31 100644 --- a/mace/ops/winograd_inverse_transform.h +++ b/mace/ops/winograd_inverse_transform.h @@ -30,13 +30,13 @@ class WinogradInverseTransformOp : public Operator { public: WinogradInverseTransformOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(OperatorBase::GetSingleArgument("batch", 1), - OperatorBase::GetSingleArgument("height", 0), - OperatorBase::GetSingleArgument("width", 0), + functor_(OperatorBase::GetOptionalArg("batch", 1), + OperatorBase::GetOptionalArg("height", 0), + OperatorBase::GetOptionalArg("width", 0), kernels::StringToActivationType( - OperatorBase::GetSingleArgument("activation", - "NOOP")), - OperatorBase::GetSingleArgument("max_limit", 0.0f)) {} + OperatorBase::GetOptionalArg("activation", + "NOOP")), + OperatorBase::GetOptionalArg("max_limit", 0.0f)) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input_tensor = this->Input(INPUT); diff --git a/mace/ops/winograd_transform.h b/mace/ops/winograd_transform.h index a5795b12..e9a3afbe 100644 --- a/mace/ops/winograd_transform.h +++ b/mace/ops/winograd_transform.h @@ -28,9 +28,9 @@ class WinogradTransformOp : public Operator { public: WinogradTransformOp(const OperatorDef &op_def, Workspace *ws) : Operator(op_def, ws), - functor_(static_cast(OperatorBase::GetSingleArgument( + functor_(static_cast(OperatorBase::GetOptionalArg( "padding", static_cast(VALID))), - OperatorBase::GetRepeatedArgument("padding_values")) {} + OperatorBase::GetRepeatedArgs("padding_values")) {} MaceStatus Run(StatsFuture *future) override { const Tensor *input_tensor = this->Input(INPUT); diff --git a/mace/proto/mace.proto b/mace/proto/mace.proto index d0294f96..08ad648e 100644 --- a/mace/proto/mace.proto +++ b/mace/proto/mace.proto @@ -4,8 +4,11 @@ package mace; option optimize_for = LITE_RUNTIME; +// For better compatibility, +// the mace.proto is refered from tensorflow and caffe2. + enum NetMode { - INIT = 0; + INIT = 0; NORMAL = 1; } @@ -64,7 +67,7 @@ message OperatorDef { optional uint32 op_id = 101; optional uint32 padding = 102; repeated NodeInput node_input = 103; - repeated int32 out_max_byte_size = 104; // only support 32-bit len + repeated int32 out_max_byte_size = 104; // only support 32-bit len } // for memory optimization @@ -82,14 +85,14 @@ message InputInfo { optional string name = 1; optional int32 node_id = 2; repeated int32 dims = 3; - optional int32 max_byte_size = 4; // only support 32-bit len + optional int32 max_byte_size = 4; // only support 32-bit len optional DataType data_type = 5 [default = DT_FLOAT]; } message OutputInfo { optional string name = 1; optional int32 node_id = 2; repeated int32 dims = 3; - optional int32 max_byte_size = 4; // only support 32-bit len + optional int32 max_byte_size = 4; // only support 32-bit len optional DataType data_type = 5 [default = DT_FLOAT]; } diff --git a/mace/utils/logging.h b/mace/utils/logging.h index 0a56fb65..8f8fe87c 100644 --- a/mace/utils/logging.h +++ b/mace/utils/logging.h @@ -117,7 +117,7 @@ class LatencyLogger { const std::string message_; int64_t start_micros_; - DISABLE_COPY_AND_ASSIGN(LatencyLogger); + MACE_DISABLE_COPY_AND_ASSIGN(LatencyLogger); }; #define MACE_LATENCY_LOGGER(vlog_level, ...) \ diff --git a/mace/utils/timer.h b/mace/utils/timer.h index 46b9cd14..d8e4a8f8 100644 --- a/mace/utils/timer.h +++ b/mace/utils/timer.h @@ -58,7 +58,7 @@ class WallClockTimer : public Timer { double stop_micros_; double accumulated_micros_; - DISABLE_COPY_AND_ASSIGN(WallClockTimer); + MACE_DISABLE_COPY_AND_ASSIGN(WallClockTimer); }; } // namespace mace diff --git a/mace/utils/utils.h b/mace/utils/utils.h index 391bd2d9..b7221768 100644 --- a/mace/utils/utils.h +++ b/mace/utils/utils.h @@ -24,11 +24,11 @@ namespace mace { // Disable the copy and assignment operator for a class. -#ifndef DISABLE_COPY_AND_ASSIGN -#define DISABLE_COPY_AND_ASSIGN(classname) \ - private: \ - classname(const classname &) = delete; \ - classname &operator=(const classname &) = delete +#ifndef MACE_DISABLE_COPY_AND_ASSIGN +#define MACE_DISABLE_COPY_AND_ASSIGN(CLASSNAME) \ + private: \ + CLASSNAME(const CLASSNAME &) = delete; \ + CLASSNAME &operator=(const CLASSNAME &) = delete #endif template @@ -132,7 +132,7 @@ inline std::vector Split(const std::string &str, char delims) { } inline bool ReadBinaryFile(std::vector *data, - const std::string &filename) { + const std::string &filename) { std::ifstream ifs(filename, std::ios::in | std::ios::binary); if (!ifs.is_open()) { return false; -- GitLab