提交 e0bfe465 编写于 作者: H hjchen2

Support parsing anchor_generator/proposals/psroi_pool operators

上级 a2164eb7
...@@ -104,6 +104,11 @@ const char *G_OP_TYPE_SEQUENCE_EXPAND = "sequence_expand"; ...@@ -104,6 +104,11 @@ const char *G_OP_TYPE_SEQUENCE_EXPAND = "sequence_expand";
const char *G_OP_TYPE_SEQUENCE_POOL = "sequence_pool"; const char *G_OP_TYPE_SEQUENCE_POOL = "sequence_pool";
const char *G_OP_TYPE_SEQUENCE_SOFTMAX = "sequence_softmax"; const char *G_OP_TYPE_SEQUENCE_SOFTMAX = "sequence_softmax";
const char *G_OP_TYPE_SLICE = "slice";
const char *G_OP_TYPE_ANCHOR_GENERATOR = "anchor_generator";
const char *G_OP_TYPE_GENERATE_PROPOSALS = "generate_proposals";
const char *G_OP_TYPE_PSROI_POOL = "psroi_pool";
std::unordered_map< std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
op_input_output_key = { op_input_output_key = {
...@@ -193,5 +198,11 @@ std::unordered_map< ...@@ -193,5 +198,11 @@ std::unordered_map<
{G_OP_TYPE_LOGICAL_XOR, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_LOGICAL_XOR, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_LOGICAL_NOT, {{"X"}, {"Out"}}}, {G_OP_TYPE_LOGICAL_NOT, {{"X"}, {"Out"}}},
{G_OP_TYPE_WRITE_TO_ARRAY, {{"X", "I"}, {"Out"}}}, {G_OP_TYPE_WRITE_TO_ARRAY, {{"X", "I"}, {"Out"}}},
{G_OP_TYPE_READ_FROM_ARRAY, {{"X", "I"}, {"Out"}}}}; {G_OP_TYPE_READ_FROM_ARRAY, {{"X", "I"}, {"Out"}}},
{G_OP_TYPE_SLICE, {{"Input"}, {"Out"}}},
{G_OP_TYPE_ANCHOR_GENERATOR, {{"Input"}, {"Anchors", "Variances"}}},
{G_OP_TYPE_GENERATE_PROPOSALS,
{{"Scores", "BboxDeltas", "ImInfo", "Anchors", "Variances"},
{"RpnRois", "RpnRoiProbs"}}},
{G_OP_TYPE_PSROI_POOL, {{"X", "ROIs"}, {"Out"}}}};
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -192,6 +192,11 @@ extern const char *G_OP_TYPE_SEQUENCE_EXPAND; ...@@ -192,6 +192,11 @@ extern const char *G_OP_TYPE_SEQUENCE_EXPAND;
extern const char *G_OP_TYPE_SEQUENCE_POOL; extern const char *G_OP_TYPE_SEQUENCE_POOL;
extern const char *G_OP_TYPE_SEQUENCE_SOFTMAX; extern const char *G_OP_TYPE_SEQUENCE_SOFTMAX;
extern const char *G_OP_TYPE_SLICE;
extern const char *G_OP_TYPE_ANCHOR_GENERATOR;
extern const char *G_OP_TYPE_GENERATE_PROPOSALS;
extern const char *G_OP_TYPE_PSROI_POOL;
extern std::unordered_map< extern std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
op_input_output_key; op_input_output_key;
......
...@@ -306,3 +306,12 @@ LOAD_OP1(write_to_array, CPU); ...@@ -306,3 +306,12 @@ LOAD_OP1(write_to_array, CPU);
#ifdef READ_FROM_ARRAY_OP #ifdef READ_FROM_ARRAY_OP
LOAD_OP1(read_from_array, CPU); LOAD_OP1(read_from_array, CPU);
#endif #endif
#ifdef ANCHOR_GENERATOR_OP
LOAD_OP1(anchor_generator, CPU);
#endif
#ifdef PROPOSAL_OP
LOAD_OP1(generate_proposals, CPU);
#endif
#ifdef PSROI_POOL_OP
LOAD_OP1(psroi_pool, CPU);
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/detection_ops.h"
#include <vector>
namespace paddle_mobile {
namespace operators {
#ifdef ANCHOR_GENERATOR_OP
template <typename DeviceType, typename T>
void AnchorGeneratorOp<DeviceType, T>::InferShape() const {
const auto &input_dims = this->param_.input_->dims();
PADDLE_MOBILE_ENFORCE(input_dims.size() == 4, "The layout of input is NCHW.");
const auto &anchor_sizes = this->param_.anchor_sizes_;
const auto &aspect_ratios = this->param_.aspect_ratios_;
size_t num_anchors = aspect_ratios.size() * anchor_sizes.size();
std::vector<int64_t> dim_vec(4);
dim_vec[0] = input_dims[2];
dim_vec[1] = input_dims[3];
dim_vec[2] = num_anchors;
dim_vec[3] = 4;
this->param_.output_anchors_->Resize(framework::make_ddim(dim_vec));
this->param_.output_variances_->Resize(framework::make_ddim(dim_vec));
}
#endif
#ifdef PROPOSAL_OP
template <typename DeviceType, typename T>
void ProposalOp<DeviceType, T>::InferShape() const {
this->param_.rpn_rois_->Resize(framework::make_ddim({-1, 4}));
this->param_.rpn_probs_->Resize(framework::make_ddim({-1, 1}));
}
#endif
#ifdef PSROI_POOL_OP
template <typename DeviceType, typename T>
void PSRoiPoolOp<DeviceType, T>::InferShape() const {
const auto &rois_dims = this->param_.input_rois_->dims();
const int pooled_height = this->param_.pooled_height_;
const int pooled_width = this->param_.pooled_width_;
const int output_channels = this->param_.output_channels_;
auto out_dims = this->param_.input_x_->dims();
out_dims[0] = rois_dims[0];
out_dims[1] =
output_channels; // input_dims[1] / (pooled_height * pooled_width);
out_dims[2] = pooled_height;
out_dims[3] = pooled_width;
this->param_.output_->Resize(out_dims);
}
#endif
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
#ifdef ANCHOR_GENERATOR_OP
REGISTER_OPERATOR_CPU(anchor_generator, ops::AnchorGeneratorOp);
#endif
#ifdef PROPOSAL_OP
REGISTER_OPERATOR_CPU(generate_proposals, ops::ProposalOp);
#endif
#ifdef PSROI_POOL_OP
REGISTER_OPERATOR_CPU(psroi_pool, ops::PSRoiPoolOp);
#endif
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/detection_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef ANCHOR_GENERATOR_OP
DECLARE_OPERATOR(AnchorGenerator, AnchorGeneratorParam, AnchorGeneratorKernel);
#endif
#ifdef PROPOSAL_OP
DECLARE_OPERATOR(Proposal, ProposalParam, ProposalKernel);
#endif
#ifdef PSROI_POOL_OP
DECLARE_OPERATOR(PSRoiPool, PSRoiPoolParam, PSRoiPoolKernel);
#endif
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef ANCHOR_GENERATOR_OP
#include <vector>
#include "operators/kernel/detection_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool AnchorGeneratorKernel<CPU, float>::Init(AnchorGeneratorParam<CPU> *param) {
return true;
}
template <>
void AnchorGeneratorKernel<CPU, float>::Compute(
const AnchorGeneratorParam<CPU> &param) {
// TODO(hjchen2)
}
} // namespace operators
} // namespace paddle_mobile
#endif // ANCHOR_GENERATOR_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PROPOSAL_OP
#include <vector>
#include "operators/kernel/detection_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool ProposalKernel<CPU, float>::Init(ProposalParam<CPU> *param) {
return true;
}
template <>
void ProposalKernel<CPU, float>::Compute(const ProposalParam<CPU> &param) {
// TODO(hjchen2)
}
} // namespace operators
} // namespace paddle_mobile
#endif // PROPOSAL_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef PSROI_POOL_OP
#include <vector>
#include "operators/kernel/detection_kernel.h"
namespace paddle_mobile {
namespace operators {
template <>
bool PSRoiPoolKernel<CPU, float>::Init(PSRoiPoolParam<CPU> *param) {
return true;
}
template <>
void PSRoiPoolKernel<CPU, float>::Compute(const PSRoiPoolParam<CPU> &param) {
// TODO(hjchen2)
}
} // namespace operators
} // namespace paddle_mobile
#endif // PSROI_POOL_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <vector>
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef ANCHOR_GENERATOR_OP
template <typename Dtype>
class AnchorGeneratorParam : public OpParam {
public:
AnchorGeneratorParam(const VariableNameMap &inputs,
const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_ = OpParam::GetVarValue<framework::Tensor>("Input", inputs, scope);
output_anchors_ =
OpParam::GetVarValue<framework::Tensor>("Anchors", outputs, scope);
output_variances_ =
OpParam::GetVarValue<framework::Tensor>("Variances", outputs, scope);
anchor_sizes_ = OpParam::GetAttr<std::vector<float>>("anchor_sizes", attrs);
aspect_ratios_ =
OpParam::GetAttr<std::vector<float>>("aspect_ratios", attrs);
variances_ = OpParam::GetAttr<std::vector<float>>("variances", attrs);
stride_ = OpParam::GetAttr<std::vector<float>>("stride", attrs);
offset_ = OpParam::GetAttr<float>("offset", attrs);
}
public:
// input
framework::Tensor *input_;
// outputs
framework::Tensor *output_anchors_;
framework::Tensor *output_variances_;
std::vector<float> anchor_sizes_;
std::vector<float> aspect_ratios_;
std::vector<float> variances_;
std::vector<float> stride_;
float offset_;
};
DECLARE_KERNEL(AnchorGenerator, AnchorGeneratorParam);
#endif
#ifdef PROPOSAL_OP
template <typename Dtype>
class ProposalParam : public OpParam {
public:
ProposalParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
scores_ = OpParam::GetVarValue<framework::Tensor>("Scores", inputs, scope);
bbox_deltas_ =
OpParam::GetVarValue<framework::Tensor>("BboxDeltas", inputs, scope);
im_info_ = OpParam::GetVarValue<framework::Tensor>("ImInfo", inputs, scope);
anchors_ =
OpParam::GetVarValue<framework::Tensor>("Anchors", inputs, scope);
variances_ =
OpParam::GetVarValue<framework::Tensor>("Variances", inputs, scope);
rpn_rois_ =
OpParam::GetVarValue<framework::LoDTensor>("RpnRois", outputs, scope);
rpn_probs_ = OpParam::GetVarValue<framework::LoDTensor>("RpnRoiProbs",
outputs, scope);
pre_nms_topn_ = OpParam::GetAttr<int>("pre_nms_topN", attrs);
post_nms_topn_ = OpParam::GetAttr<int>("post_nms_topN", attrs);
nms_thresh_ = OpParam::GetAttr<float>("nms_thresh", attrs);
min_size_ = OpParam::GetAttr<float>("min_size", attrs);
eta_ = OpParam::GetAttr<float>("eta", attrs);
}
public:
framework::Tensor *scores_;
framework::Tensor *bbox_deltas_;
framework::Tensor *im_info_;
framework::Tensor *anchors_;
framework::Tensor *variances_;
framework::LoDTensor *rpn_rois_;
framework::LoDTensor *rpn_probs_;
int pre_nms_topn_;
int post_nms_topn_;
float nms_thresh_;
float min_size_;
float eta_;
};
DECLARE_KERNEL(Proposal, ProposalParam);
#endif
#ifdef PSROI_POOL_OP
template <typename Dtype>
class PSRoiPoolParam : public OpParam {
public:
PSRoiPoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
input_x_ = OpParam::GetVarValue<framework::Tensor>("X", inputs, scope);
input_rois_ =
OpParam::GetVarValue<framework::LoDTensor>("ROIs", inputs, scope);
output_ = OpParam::GetVarValue<framework::Tensor>("Out", outputs, scope);
output_channels_ = OpParam::GetAttr<int>("output_channels", attrs);
pooled_height_ = OpParam::GetAttr<int>("pooled_height", attrs);
pooled_width_ = OpParam::GetAttr<int>("pooled_width", attrs);
spatial_scale_ = OpParam::GetAttr<float>("spatial_scale", attrs);
}
public:
framework::Tensor *input_x_;
framework::LoDTensor *input_rois_;
framework::Tensor *output_;
int output_channels_;
int pooled_height_;
int pooled_width_;
float spatial_scale_;
};
DECLARE_KERNEL(PSRoiPool, PSRoiPoolParam);
#endif
} // namespace operators
} // namespace paddle_mobile
...@@ -1498,33 +1498,20 @@ class SliceParam : public OpParam { ...@@ -1498,33 +1498,20 @@ class SliceParam : public OpParam {
public: public:
SliceParam(const VariableNameMap &inputs, const VariableNameMap &outputs, SliceParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_x_ = InputXFrom<GType>(inputs, scope); input_ = InputFrom<GType>(inputs, scope);
input_shape_ = InputShapeFrom<GType>(inputs, scope); output_ = OutFrom<GType>(outputs, scope);
out_ = OutFrom<GType>(outputs, scope);
axis_ = GetAttr<int>("axis", attrs);
slice_points_ = GetAttr<vector<int>>("slice_points", attrs);
inplace_ = GetAttr<bool>("inplace", attrs);
}
const RType *InputX() const { return input_x_; }
const RType *InputShape() const { return input_shape_; }
RType *Out() const { return out_; }
const int &Axis() const { return axis_; }
const vector<int> &SlicePoints() const { return slice_points_; }
const bool &Inplace() const { return inplace_; } axes_ = GetAttr<std::vector<int>>("axes", attrs);
starts_ = GetAttr<std::vector<int>>("starts", attrs);
ends_ = GetAttr<std::vector<int>>("ends", attrs);
}
private: public:
RType *input_x_; GType *input_;
RType *input_shape_; GType *output_;
RType *out_; std::vector<int> axes_;
int axis_; std::vector<int> starts_;
vector<int> slice_points_; std::vector<int> ends_;
bool inplace_;
}; };
#endif #endif
......
...@@ -228,39 +228,39 @@ int TestAll(const int in_channels, const int in_height, const int in_width, ...@@ -228,39 +228,39 @@ int TestAll(const int in_channels, const int in_height, const int in_width,
std::cerr << "in_channels=" << in_channels << ", in_height=" << in_height std::cerr << "in_channels=" << in_channels << ", in_height=" << in_height
<< ", in_width=" << in_width << ", out_channels=" << out_channels << ", in_width=" << in_width << ", out_channels=" << out_channels
<< ", groups=" << groups << std::endl; << ", groups=" << groups << std::endl;
// kernel = 3, pad = 0, stride = 1 // // kernel = 3, pad = 0, stride = 1
std::cerr << "float, kernel=3, pad=0, stride=1" << std::endl; // std::cerr << "float, kernel=3, pad=0, stride=1" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 0, 1>( // paddle_mobile::TestConvOp<float, float, 3, 0, 1>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 1, stride = 1 // // kernel = 3, pad = 1, stride = 1
std::cerr << "float, kernel=3, pad=1, stride=1" << std::endl; // std::cerr << "float, kernel=3, pad=1, stride=1" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 1, 1>( // paddle_mobile::TestConvOp<float, float, 3, 1, 1>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 2, stride = 1 // // kernel = 3, pad = 2, stride = 1
std::cerr << "float, kernel=3, pad=2, stride=1" << std::endl; // std::cerr << "float, kernel=3, pad=2, stride=1" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 2, 1>( // paddle_mobile::TestConvOp<float, float, 3, 2, 1>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 5, stride = 1 // // kernel = 3, pad = 5, stride = 1
std::cerr << "float, kernel=3, pad=5, stride=1" << std::endl; // std::cerr << "float, kernel=3, pad=5, stride=1" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 5, 1>( // paddle_mobile::TestConvOp<float, float, 3, 5, 1>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
//
// kernel = 3, pad = 0, stride = 2 // // kernel = 3, pad = 0, stride = 2
std::cerr << "float, kernel=3, pad=0, stride=2" << std::endl; // std::cerr << "float, kernel=3, pad=0, stride=2" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 0, 2>( // paddle_mobile::TestConvOp<float, float, 3, 0, 2>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 1, stride = 2 // // kernel = 3, pad = 1, stride = 2
std::cerr << "float, kernel=3, pad=1, stride=2" << std::endl; // std::cerr << "float, kernel=3, pad=1, stride=2" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 1, 2>( // paddle_mobile::TestConvOp<float, float, 3, 1, 2>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 2, stride = 2 // // kernel = 3, pad = 2, stride = 2
std::cerr << "float, kernel=3, pad=2, stride=2" << std::endl; // std::cerr << "float, kernel=3, pad=2, stride=2" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 2, 2>( // paddle_mobile::TestConvOp<float, float, 3, 2, 2>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 5, stride = 2 // // kernel = 3, pad = 5, stride = 2
std::cerr << "float, kernel=3, pad=5, stride=2" << std::endl; // std::cerr << "float, kernel=3, pad=5, stride=2" << std::endl;
paddle_mobile::TestConvOp<float, float, 3, 5, 2>( // paddle_mobile::TestConvOp<float, float, 3, 5, 2>(
in_channels, in_height, in_width, out_channels, groups); // in_channels, in_height, in_width, out_channels, groups);
#ifndef __aarch64__ #ifndef __aarch64__
// kernel = 3, pad = 0, stride = 1 // kernel = 3, pad = 0, stride = 1
......
...@@ -288,6 +288,9 @@ if(NOT FOUND_MATCH) ...@@ -288,6 +288,9 @@ if(NOT FOUND_MATCH)
set(WHILE_OP ON) set(WHILE_OP ON)
set(WRITE_TO_ARRAY_OP ON) set(WRITE_TO_ARRAY_OP ON)
set(READ_FROM_ARRAY_OP ON) set(READ_FROM_ARRAY_OP ON)
set(ANCHOR_GENERATOR_OP ON)
set(PROPOSAL_OP ON)
set(PSROI_POOL_OP ON)
endif() endif()
# option(BATCHNORM_OP "" ON) # option(BATCHNORM_OP "" ON)
...@@ -572,3 +575,13 @@ endif() ...@@ -572,3 +575,13 @@ endif()
if (READ_FROM_ARRAY_OP) if (READ_FROM_ARRAY_OP)
add_definitions(-DREAD_FROM_ARRAY_OP) add_definitions(-DREAD_FROM_ARRAY_OP)
endif() endif()
if (ANCHOR_GENERATOR_OP)
add_definitions(-DANCHOR_GENERATOR_OP)
endif()
if (PROPOSAL_OP)
add_definitions(-DPROPOSAL_OP)
endif()
if (PSROI_POOL_OP)
add_definitions(-DPSROI_POOL_OP)
endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册