提交 2766ab00 编写于 作者: Y yaokun01

add unit test

上级 bf34a8b8
...@@ -94,6 +94,7 @@ static const std::string G_OP_TYPE_SPLIT = "split"; ...@@ -94,6 +94,7 @@ static const std::string G_OP_TYPE_SPLIT = "split";
static const std::string G_OP_TYPE_FEED = "feed"; static const std::string G_OP_TYPE_FEED = "feed";
static const std::string G_OP_TYPE_FETCH = "fetch"; static const std::string G_OP_TYPE_FETCH = "fetch";
static const std::string G_OP_TYPE_DEPTHWISE_CONV = "depthwise_conv2d"; static const std::string G_OP_TYPE_DEPTHWISE_CONV = "depthwise_conv2d";
static const std::string G_OP_TYPE_IM2SEQUENCE = "im2sequence";
static std::unordered_map< static std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
...@@ -115,5 +116,6 @@ static std::unordered_map< ...@@ -115,5 +116,6 @@ static std::unordered_map<
{{"PriorBox", "PriorBoxVar", "TargetBox"}, {"OutputBox"}}}, {{"PriorBox", "PriorBoxVar", "TargetBox"}, {"OutputBox"}}},
{G_OP_TYPE_PRIOR_BOX, {{"Image", "Input"}, {"Boxes", "Variances"}}}, {G_OP_TYPE_PRIOR_BOX, {{"Image", "Input"}, {"Boxes", "Variances"}}},
{G_OP_TYPE_MULTICLASS_NMS, {{"BBoxes", "Scores"}, {"Out"}}}, {G_OP_TYPE_MULTICLASS_NMS, {{"BBoxes", "Scores"}, {"Out"}}},
{G_OP_TYPE_RESHAPE, {{"X"}, {"Out"}}}}; {G_OP_TYPE_RESHAPE, {{"X"}, {"Out"}}},
{G_OP_TYPE_IM2SEQUENCE, {{"X"}, {"Out"}}}};
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -26,7 +26,7 @@ int Im2SequenceOutputSize(int input_size, int kernel, int padding_1, int padding ...@@ -26,7 +26,7 @@ int Im2SequenceOutputSize(int input_size, int kernel, int padding_1, int padding
template <typename Dtype, typename T> template <typename Dtype, typename T>
void Im2SequenceOp<Dtype, T>::InferShape() const { void Im2SequenceOp<Dtype, T>::InferShape() const {
auto in_dims = param_.Input()->dims(); auto in_x_dims = param_.Input()->dims();
const std::vector<int> &kernels = param_.Kernels(); const std::vector<int> &kernels = param_.Kernels();
...@@ -34,9 +34,9 @@ void Im2SequenceOp<Dtype, T>::InferShape() const { ...@@ -34,9 +34,9 @@ void Im2SequenceOp<Dtype, T>::InferShape() const {
std::vector<int> paddings = param_.Paddings(); std::vector<int> paddings = param_.Paddings();
std::vector<int64_t> output_shape({in_dims[0], in_dims[0]}); std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < strides.size(); ++i) { for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back(Im2SequenceOutputSize(in_dims[i + 2], kernels[i], output_shape.push_back(Im2SequenceOutputSize(in_x_dims[i + 2], kernels[i],
paddings[i], paddings[i + 2], paddings[i], paddings[i + 2],
strides[i])); strides[i]));
} }
......
...@@ -39,7 +39,7 @@ class Im2SequenceOp : public framework::OperatorWithKernel<DeviceType> { ...@@ -39,7 +39,7 @@ class Im2SequenceOp : public framework::OperatorWithKernel<DeviceType> {
void RunImpl() const { void RunImpl() const {
operators::Im2SequenceKernel<DeviceType, T> kernel; operators::Im2SequenceKernel<DeviceType, T> kernel;
kernel.Compute(param_); kernel.Compute(param_);
this->ClearVariables({"Input"}); this->ClearVariables({"X"});
} }
private: private:
......
...@@ -38,21 +38,19 @@ inline int Im2SeqOutputSize(int input_size, int filter_size, int padding_0, ...@@ -38,21 +38,19 @@ inline int Im2SeqOutputSize(int input_size, int filter_size, int padding_0,
template <> template <>
void Im2SequenceKernel<CPU, float>::Compute(const Im2SequenceParam &param) const { void Im2SequenceKernel<CPU, float>::Compute(const Im2SequenceParam &param) const {
//LOG(kLOG_DEBUG) << param; const Tensor *in_x = param.Input();
Tensor *out = param.Output();
const Tensor *input = param.Input(); out->mutable_data<float>();
Tensor *output = param.Output();
output->mutable_data<float>();
std::vector<int> kernels = param.Kernels(); std::vector<int> kernels = param.Kernels();
std::vector<int> strides = param.Strides(); std::vector<int> strides = param.Strides();
std::vector<int> paddings = param.Paddings(); std::vector<int> paddings = param.Paddings();
auto in_dim = input->dims(); auto in_x_dim = in_x->dims();
const int batch_size = static_cast<int>(in_dim[0]); const int batch_size = static_cast<int>(in_x_dim[0]);
const int img_channels = static_cast<int>(in_dim[1]); const int img_channels = static_cast<int>(in_x_dim[1]);
const int img_height = static_cast<int>(in_dim[2]); const int img_height = static_cast<int>(in_x_dim[2]);
const int img_width = static_cast<int>(in_dim[3]); const int img_width = static_cast<int>(in_x_dim[3]);
int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0],
paddings[2], strides[0]); paddings[2], strides[0]);
...@@ -61,19 +59,19 @@ void Im2SequenceKernel<CPU, float>::Compute(const Im2SequenceParam &param) const ...@@ -61,19 +59,19 @@ void Im2SequenceKernel<CPU, float>::Compute(const Im2SequenceParam &param) const
const std::vector<int> dilations({1, 1}); const std::vector<int> dilations({1, 1});
//TODO: verify //TODO: verify
auto out_dims = output->dims(); auto out_dims = out->dims();
output->Resize({batch_size, output->numel() / batch_size}); out->Resize({batch_size, out->numel() / batch_size});
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
const Tensor src = const Tensor src =
input->Slice(i, i + 1).Resize({img_channels, img_height, img_width}); in_x->Slice(i, i + 1).Resize({img_channels, img_height, img_width});
Tensor dst = output->Slice(i, i + 1).Resize( Tensor dst = out->Slice(i, i + 1).Resize(
{output_height, output_width, img_channels, kernels[0], kernels[1]}); {output_height, output_width, img_channels, kernels[0], kernels[1]});
math::Im2ColFunctor<math::ColFormat::kOCF, CPU, float> f; math::Im2ColFunctor<math::ColFormat::kOCF, CPU, float> f;
f(src, dilations, strides, paddings, &dst); f(src, dilations, strides, paddings, &dst);
} }
output->Resize(out_dims); out->Resize(out_dims);
} }
template class Im2SequenceKernel<CPU, float>; template class Im2SequenceKernel<CPU, float>;
......
...@@ -756,16 +756,16 @@ class Im2SequenceParam : public OpParam { ...@@ -756,16 +756,16 @@ class Im2SequenceParam : public OpParam {
public: public:
Im2SequenceParam(const VariableNameMap &inputs, const VariableNameMap &outputs, Im2SequenceParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_ = InputFrom<LoDTensor>(inputs, scope); input_x_ = InputXFrom<LoDTensor>(inputs, scope);
output_ = OutputFrom<LoDTensor>(outputs, scope); out_ = OutFrom<LoDTensor>(outputs, scope);
kernels_ = GetAttr<vector<int>>("kernels", attrs); kernels_ = GetAttr<vector<int>>("kernels", attrs);
strides_ = GetAttr<vector<int>>("strides", attrs); strides_ = GetAttr<vector<int>>("strides", attrs);
paddings_ = GetAttr<vector<int>>("paddings", attrs); paddings_ = GetAttr<vector<int>>("paddings", attrs);
} }
const Tensor *Input() const { return input_; } const Tensor *Input() const { return input_x_; }
Tensor *Output() const { return output_; } Tensor *Output() const { return out_; }
const vector<int> &Kernels() const { return kernels_; } const vector<int> &Kernels() const { return kernels_; }
...@@ -774,8 +774,8 @@ class Im2SequenceParam : public OpParam { ...@@ -774,8 +774,8 @@ class Im2SequenceParam : public OpParam {
const vector<int> &Paddings() const { return paddings_; } const vector<int> &Paddings() const { return paddings_; }
private: private:
Tensor *input_; Tensor *input_x_;
Tensor *output_; Tensor *out_;
vector<int> kernels_; vector<int> kernels_;
vector<int> strides_; vector<int> strides_;
vector<int> paddings_; vector<int> paddings_;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../executor_for_test.h"
#include "../test_include.h"
#include "operators/im2sequence_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(g_ocr_recg);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::ReluOp<paddle_mobile::CPU, float>>
executor(program, "im2sequence");
// 1. input_tensors;
vector<Tensor> input_tensors;
Tensor input1;
auto input1_data = CreateInput<float>(&input1, {2, 2, 3, 3}, -1, 1);
input_tensors.push_back(input1);
// 2. input_names
vector<string> input_names({
"conv2d_19.tmp_1",
});
// 3. output_names
vector<string> output_names({"im2sequence_0.tmp_0"});
// 4. out_dims;
vector<DDim> out_ddims;
auto out_ddim = paddle_mobile::framework::make_ddim({8, 9});
out_ddims.push_back(out_ddim);
auto output = executor.Predict<LoDTensor>(input_tensors, input_names,
output_names, out_ddims);
auto output0_data = output[0]->data<float>();
for (int j = 0; j < input_tensors[0].numel(); ++j) {
DLOG << " value of input: " << input1_data[j];
}
for (int j = 0; j < output[0]->numel(); ++j) {
DLOG << " value of output: " << output0_data[j];
}
return 0;
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册