diff --git a/mace/kernels/deconv_2d.h b/mace/kernels/deconv_2d.h index fef536853e19b4e975efaf2970293fefb815a565..3eb10f778f4b0fc2375dabe7e8ad0ffb4e85052d 100644 --- a/mace/kernels/deconv_2d.h +++ b/mace/kernels/deconv_2d.h @@ -23,12 +23,15 @@ #include #include "mace/core/future.h" -#include "mace/core/runtime/opencl/cl2_header.h" #include "mace/core/tensor.h" #include "mace/kernels/activation.h" #include "mace/kernels/conv_pool_2d_util.h" #include "mace/utils/utils.h" +#ifdef MACE_ENABLE_OPENCL +#include "mace/core/runtime/opencl/cl2_header.h" +#endif // MACE_ENABLE_OPENCL + namespace mace { namespace kernels { @@ -126,7 +129,6 @@ struct Deconv2dFunctorBase { const index_t in_height = isNCHW ? input_shape[2] : input_shape[1]; const index_t in_width = isNCHW ? input_shape[3] : input_shape[2]; - const index_t in_channels = isNCHW ? input_shape[1] : input_shape[3]; const index_t extended_input_height = (in_height - 1) * strides[0] + 1 + padding_size[0]; @@ -168,11 +170,9 @@ struct Deconv2dFunctorBase { const index_t in_height = isNCHW ? input_shape[2] : input_shape[1]; const index_t in_width = isNCHW ? input_shape[3] : input_shape[2]; - const index_t in_channels = isNCHW ? input_shape[1] : input_shape[3]; const index_t out_height = isNCHW ? output_shape[2] : output_shape[1]; const index_t out_width = isNCHW ? output_shape[3] : output_shape[2]; - const index_t out_channels = isNCHW ? output_shape[1] : output_shape[3]; const index_t extended_input_height = (in_height - 1) * strides[0] + 1; const index_t extended_input_width = (in_width - 1) * strides[1] + 1; @@ -216,9 +216,9 @@ struct Deconv2dFunctorBase { const int *strides_; // [stride_h, stride_w] const Padding padding_type_; std::vector paddings_; + std::vector output_shape_; const ActivationType activation_; const float relux_max_limit_; - std::vector output_shape_; }; template @@ -228,9 +228,7 @@ struct Deconv2dFunctor : Deconv2dFunctorBase { const std::vector &paddings, const std::vector &output_shape, const ActivationType activation, - const float relux_max_limit, - const bool is_filter_transformed, - ScratchBuffer *scratch) + const float relux_max_limit) : Deconv2dFunctorBase(strides, padding_type, paddings, @@ -243,6 +241,7 @@ struct Deconv2dFunctor : Deconv2dFunctorBase { const Tensor *bias, Tensor *output, StatsFuture *future) { + MACE_UNUSED(future); MACE_CHECK_NOTNULL(input); MACE_CHECK_NOTNULL(filter); MACE_CHECK_NOTNULL(output); @@ -315,6 +314,7 @@ struct Deconv2dFunctor : Deconv2dFunctorBase { } }; +#ifdef MACE_ENABLE_OPENCL template struct Deconv2dFunctor : Deconv2dFunctorBase { Deconv2dFunctor(const int *strides, @@ -322,9 +322,7 @@ struct Deconv2dFunctor : Deconv2dFunctorBase { const std::vector &paddings, const std::vector &output_shape, const ActivationType activation, - const float relux_max_limit, - const bool is_filter_transformed, - ScratchBuffer *scratch) + const float relux_max_limit) : Deconv2dFunctorBase(strides, padding_type, paddings, @@ -343,6 +341,7 @@ struct Deconv2dFunctor : Deconv2dFunctorBase { std::unique_ptr kernel_error_; std::vector input_shape_; }; +#endif // MACE_ENABLE_OPENCL } // namespace kernels } // namespace mace diff --git a/mace/ops/deconv_2d.cc b/mace/ops/deconv_2d.cc index b49776937fd4f94d223b3d42edb85ecd65b0337d..4666296d8bf873c9dce2b855ee97fc92b5301cfc 100644 --- a/mace/ops/deconv_2d.cc +++ b/mace/ops/deconv_2d.cc @@ -24,6 +24,7 @@ void Register_Deconv2D(OperatorRegistry *op_registry) { .Build(), Deconv2dOp); +#ifdef MACE_ENABLE_OPENCL REGISTER_OPERATOR(op_registry, OpKeyBuilder("Deconv2D") .Device(DeviceType::GPU) .TypeConstraint("T") @@ -35,6 +36,7 @@ void Register_Deconv2D(OperatorRegistry *op_registry) { .TypeConstraint("T") .Build(), Deconv2dOp); +#endif // MACE_ENABLE_OPENCL } } // namespace ops diff --git a/mace/ops/deconv_2d.h b/mace/ops/deconv_2d.h index d20d76d8b93d2a5ec7ba35e5d9fd52ef22928b66..1796655b48f90f9f45355ad10e6ca37b1ae0f378 100644 --- a/mace/ops/deconv_2d.h +++ b/mace/ops/deconv_2d.h @@ -34,10 +34,7 @@ class Deconv2dOp : public ConvPool2dOpBase { this->paddings_, OperatorBase::GetRepeatedArgument("output_shape"), kernels::ActivationType::NOOP, - 0.0f, - static_cast(OperatorBase::GetSingleArgument( - "is_filter_transformed", false)), - ws->GetScratchBuffer(D)) {} + 0.0f) {} bool Run(StatsFuture *future) override { const Tensor *input = this->Input(INPUT); diff --git a/mace/python/tools/tensor_data.jinja2 b/mace/python/tools/tensor_data.jinja2 index fbb8986c77f294707e4a77248a46efc163ccd8ad..0f00eb3b48afa01888fc834e161b84982928fe94 100644 --- a/mace/python/tools/tensor_data.jinja2 +++ b/mace/python/tools/tensor_data.jinja2 @@ -17,6 +17,7 @@ #include #include +#include "mace/core/macros.h" #include "mace/public/mace.h" #include "mace/utils/env_time.h" #include "mace/utils/logging.h" @@ -42,6 +43,7 @@ alignas(4) const unsigned char model_data[{{ model_data_size }}] = { const unsigned char *LoadModelData(const char *model_data_file) { {% if embed_model_data %} + MACE_UNUSED(model_data_file); return model_data; {% else %} int fd = open(model_data_file, O_RDONLY); @@ -67,6 +69,8 @@ void UnloadModelData(const unsigned char *model_data) { int ret = munmap(const_cast(model_data), {{ model_data_size }}); MACE_CHECK(ret == 0, "Failed to unmap model data file, error code: ", errno); +{% else %} + MACE_UNUSED(model_data); {% endif %} }