提交 70c05971 编写于 作者: W wangliu

fix #210 add pool& pool test fix #211 add executor for testing op

上级 b8117214
......@@ -32,26 +32,28 @@ Executor<Dtype>::Executor(const Program<Dtype> p) : program_(p) {
to_predict_program_ = program_.originProgram;
}
const std::vector<std::shared_ptr<BlockDesc>> blocks =
// const std::vector<std::shared_ptr<BlockDesc>> blocks =
to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == "conv2d" && op->Input("Input")[0] == "pixel") {
Attribute strides_attr = op->GetAttrMap().at("strides");
std::vector<int> stride = strides_attr.Get<std::vector<int>>();
for (int k = 0; k < stride.size(); ++k) {
}
std::shared_ptr<operators::ConvOp<Dtype, float>> conv =
std::make_shared<operators::ConvOp<Dtype, float>>(
op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), program_.scope);
ops_of_block_[*block_desc.get()].push_back(conv);
}
}
}
// for (int i = 0; i < blocks.size(); ++i) {
// std::shared_ptr<BlockDesc> block_desc = blocks[i];
// std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
// for (int j = 0; j < ops.size(); ++j) {
// std::shared_ptr<OpDesc> op = ops[j];
// if (op->Type() == "conv2d" && op->Input("Input")[0] ==
// "pixel") {
// Attribute strides_attr = op->GetAttrMap().at("strides");
// std::vector<int> stride =
// strides_attr.Get<std::vector<int>>(); for (int k = 0; k <
// stride.size(); ++k) {
// }
// std::shared_ptr<operators::ConvOp<Dtype, float>> conv =
// std::make_shared<operators::ConvOp<Dtype, float>>(
// op->Type(), op->GetInputs(), op->GetOutputs(),
// op->GetAttrMap(), program_.scope);
// ops_of_block_[*block_desc.get()].push_back(conv);
// }
// }
// }
}
template <typename Dtype>
......@@ -82,7 +84,6 @@ void Executor<Dtype>::predict(const Tensor &t, int block_id) {
to_predict_program_->Block(block_id);
for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
auto op = ops_of_block_[*to_predict_block.get()][j];
// std::cout << "开始run" << std::endl;
op->Run();
}
}
......
......@@ -36,13 +36,18 @@ namespace framework {
template <typename Dtype> class Executor {
public:
Executor();
Executor(const Program<Dtype> p);
std::shared_ptr<Tensor> predict(Tensor &t);
private:
public:
const framework::Program<Dtype> program_;
std::shared_ptr<ProgramDesc> to_predict_program_;
void predict(const Tensor &t, int block_id);
std::map<framework::BlockDesc,
std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
ops_of_block_;
......
......@@ -60,10 +60,9 @@ template <typename Dtype> class OperatorBase : PaddleMobileObject {
const VariableNameMap &Outputs() const { return outputs_; }
const std::string &Type() const { return type_; }
const AttributeMap &Attrs() const { return attrs_; }
void ClearVariables() const {
void ClearVariables(const std::vector<std::string> &var_names) const {
if (this->scope_) {
this->scope_->EraseVars(this->inputs_.at("Filter"));
this->scope_->EraseVars(this->inputs_.at("Input"));
this->scope_->EraseVars(var_names);
}
}
......
......@@ -194,6 +194,9 @@ Loader<Dtype, P>::Load(const std::string &dirname) {
framework::proto::BlockDesc block = program_desc_proto.blocks()[i];
LOG(kLOG_DEBUG) << "block: " << block.idx();
for (int j = 0; j < block.ops().size(); ++j) {
if (j == 2) {
break;
}
framework::proto::OpDesc op = block.ops()[j];
LOG(kLOG_DEBUG1) << "op: " << op.type();
for (int m = 0; m < op.inputs_size(); ++m) {
......
......@@ -40,9 +40,9 @@ class ConvOp : public framework::OperatorWithKernel<DeviceType> {
void InferShape() const override;
void Run() const {
operators::ConvKernel<DeviceType, T, ConvParam> kernel;
operators::ConvKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables();
this->ClearVariables({"Filter", "Input"});
}
private:
......
......@@ -34,8 +34,7 @@ bool IsExpand(const std::vector<int64_t> &filter_dim,
return !(filter_1 && strides_1 && padding_0 && dilation_1);
}
template <>
void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam &param) const {
template <> void ConvKernel<CPU, float>::Compute(const ConvParam &param) const {
LOG(kLOG_DEBUG) << param;
const Tensor *input = param.Input();
......@@ -149,7 +148,7 @@ void ConvKernel<CPU, float, ConvParam>::Compute(const ConvParam &param) const {
}
}
template class ConvKernel<CPU, float, ConvParam>;
template class ConvKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "common/log.h"
#include <operators/kernel/pool_kernel.h>
namespace paddle_mobile {
namespace operators {
inline void PoolBasic(std::string pooling_type, std::vector<int> ksize,
std::vector<int> strides, std::vector<int> paddings,
const Tensor *in_x, Tensor *out) {
if (pooling_type == "max") {
math::PoolFunctor<CPU, math::MaxPool<float>, float> pool2d_forward;
math::MaxPool<float> pool_process;
pool2d_forward(*in_x, ksize, strides, paddings, pool_process, out);
} else if (pooling_type == "avg") {
math::PoolFunctor<CPU, math::AvgPool<float>, float> pool2d_forward;
math::AvgPool<float> pool_process;
pool2d_forward(*in_x, ksize, strides, paddings, pool_process, out);
}
}
template <> void PoolKernel<CPU, float>::Compute(const PoolParam &param) const {
const Tensor *in_x = param.Input();
Tensor *out = param.Output();
std::string pooling_type = param.PoolingType();
std::vector<int> ksize = param.Ksize();
std::vector<int> strides = param.Strides();
std::vector<int> paddings = param.Paddings();
if (ksize.size() != 2) {
LOG(paddle_mobile::LogLevel::kLOG_ERROR)
<< "Pool op only supports 2D and 3D input.";
}
if (param.isGlobalPooling()) {
for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]);
}
}
PoolBasic(pooling_type, ksize, strides, paddings, in_x, out);
// if (param.isGlobalPooling() || ksize[0] != ksize[1] ||
// strides[0] != strides[1] || strides[1] != 2 ||
// paddings[0] != paddings[1] || paddings[1] > 1) {
// PoolBasic(pooling_type, ksize, strides, paddings, in_x, out);
//
// } else if (ksize[0] == 2) {
//
// } else if (ksize[0] == 3) {
//
// } else {
// PoolBasic(pooling_type, ksize, strides, paddings, in_x, out);
// }
}
} // namespace operators
} // namespace paddle_mobile
......@@ -29,7 +29,7 @@ namespace operators {
using namespace framework;
template <typename DeviceType, typename T, typename P>
template <typename DeviceType, typename T>
class ConvKernel : public framework::OpKernelBase<DeviceType, ConvParam> {
public:
void Compute(const ConvParam &param) const;
......
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "framework/operator.h"
#include "operators/math/pooling.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class PoolKernel : public framework::OpKernelBase<DeviceType, PoolParam> {
public:
void Compute(const PoolParam &param) const;
};
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void Pool3x3Max() {
// todo impl with neon
}
static void Pool3x3Avg() {
// todo impl with neon
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
static void Pool2x2Max() {
// todo impl with neon
}
static void Pool2x2Avg() {
// todo impl with neon
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "pooling.h"
#include <common/types.h>
namespace paddle_mobile {
namespace operators {
namespace math {
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class PoolFunctor<CPU, PoolProcess, T> {
public:
void operator()(const framework::Tensor &input,
const std::vector<int> &ksize,
const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_process,
framework::Tensor *output) {
const int batch_size = input.dims()[0];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
if (output == nullptr) {
DLOG << "output tensor is null";
}
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = ksize[0];
const int ksize_width = ksize[1];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int input_stride = input_height * input_width;
const int output_stride = output_height * output_width;
const T *input_data = input.data<T>();
T *output_data = output->mutable_data<T>();
for (int i = 0; i < batch_size; i++) {
#pragma omp parallel for
for (int c = 0; c < output_channels; ++c) {
for (int ph = 0; ph < output_height; ++ph) {
int hstart = ph * stride_height - padding_height;
int hend = std::min(hstart + ksize_height, input_height);
hstart = std::max(hstart, 0);
for (int pw = 0; pw < output_width; ++pw) {
int wstart = pw * stride_width - padding_width;
int wend = std::min(wstart + ksize_width, input_width);
wstart = std::max(wstart, 0);
T ele = pool_process.initial();
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
pool_process.compute(
input_data[h * input_width + w], &ele);
}
}
int pool_size = (hend - hstart) * (wend - wstart);
pool_process.finalize(static_cast<T>(pool_size), &ele);
output_data[ph * output_width + pw] = ele;
}
}
input_data += input_stride;
output_data += output_stride;
}
}
}
};
template class PoolFunctor<CPU, math::AvgPool<float>, float>;
template class PoolFunctor<CPU, math::MaxPool<float>, float>;
} // namespace math
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/log.h"
#include "framework/tensor.h"
namespace paddle_mobile {
namespace operators {
namespace math {
#define FLT_MAX __FLT_MAX__
/*
* \brief Extracting simple operations from pooling.
* Both MaxPool and AvgPool need "initial", "compute" and "finalize"
* operation.
* MaxPool initializes temp variable to the negative maximum to find the
* maximum value in the pooling field.
* AvgPool initializes temp variable to the zero to accumulate all values
* in pool pooling, and finally takes the average.
* MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
*/
template <class T> class MaxPool {
public:
inline T initial() { return static_cast<T>(-FLT_MAX); }
inline void compute(const T &x, T *y) { *y = *y > x ? *y : x; }
inline void finalize(const T &pool_field, T *y) {}
};
template <class T> class AvgPool {
public:
inline T initial() { return static_cast<T>(0); }
inline void compute(const T &x, T *y) { *y += x; }
inline void finalize(const T &pool_field, T *y) { *y /= pool_field; }
};
template <typename DeviceType, typename PoolProcess, typename T>
class PoolFunctor {
public:
void operator()(const framework::Tensor &input,
const std::vector<int> &ksize,
const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_compute,
framework::Tensor *output);
};
}
} // namespace operators
} // namespace paddle_mobile
......@@ -16,7 +16,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once;
#pragma once
#include "common/log.h"
#include "common/type_define.h"
......@@ -341,5 +341,48 @@ class BatchNormParam : OpParam {
bool is_test_;
std::string data_format_;
};
class PoolParam : public OpParam {
public:
PoolParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
const framework::Scope &scope) {
input_ = InputXFrom<framework::Tensor>(inputs, scope);
output_ = OutFrom<framework::Tensor>(outputs, scope);
pooling_type_ = GetAttr<std::string>("pooling_type", attrs);
ksize_ = GetAttr<std::vector<int>>("ksize", attrs);
strides_ = GetAttr<std::vector<int>>("strides", attrs);
paddings_ = GetAttr<std::vector<int>>("paddings", attrs);
ceil_mode_ = GetAttr<bool>("ceil_mode", attrs);
gloabal_pooling_ = GetAttr<bool>("global_pooling", attrs);
}
const Tensor *Input() const { return input_; }
Tensor *Output() const { return output_; }
const std::string &PoolingType() const { return pooling_type_; }
const std::vector<int> &Ksize() const { return ksize_; }
const std::vector<int> &Strides() const { return strides_; }
const std::vector<int> &Paddings() const { return paddings_; }
bool isCeilMode() const { return ceil_mode_; }
bool isGlobalPooling() const { return gloabal_pooling_; }
private:
Tensor *input_;
Tensor *output_;
std::string pooling_type_;
std::vector<int> ksize_;
std::vector<int> strides_;
std::vector<int> paddings_;
bool ceil_mode_;
bool gloabal_pooling_ = false;
};
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "pool_op.h"
namespace paddle_mobile {
namespace operators {
int PoolOutputSize(int input_size, int filter_size, int padding, int stride,
bool ceil_mode) {
int output_size;
if (!ceil_mode) {
output_size = (input_size - filter_size + 2 * padding) / stride + 1;
} else {
output_size =
(input_size - filter_size + 2 * padding + stride - 1) / stride + 1;
}
return output_size;
}
template <typename DeviceType, typename T>
void PoolOp<DeviceType, T>::InferShape() const {
auto in_x_dims = param_.Input()->dims();
std::vector<int> ksize = param_.Ksize();
std::vector<int> paddings = param_.Paddings();
std::vector<int> strides = param_.Strides();
bool ceil_mode = param_.isCeilMode();
if (param_.isGlobalPooling()) {
ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0;
ksize[i] = static_cast<int>(in_x_dims[i + 2]);
}
}
std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
for (size_t i = 0; i < ksize.size(); ++i) {
output_shape.push_back(PoolOutputSize(
in_x_dims[i + 2], ksize[i], paddings[i], strides[i], ceil_mode));
}
param_.Output()->Resize(framework::make_ddim(output_shape));
DLOG << "infer shape out size =" << param_.Output()->numel();
}
template class PoolOp<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include <framework/operator.h>
#include <operators/kernel/pool_kernel.h>
#include <operators/op_param.h>
namespace paddle_mobile {
namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class PoolOp : public framework::OperatorWithKernel<DeviceType> {
public:
PoolOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs,
attrs, scope),
param_(inputs, outputs, attrs, *scope) {}
using framework::OperatorWithKernel<DeviceType>::OperatorWithKernel;
void InferShape() const override;
void Run() const {
// InferShape();
operators::PoolKernel<DeviceType, T> kernel;
kernel.Compute(param_);
this->ClearVariables({"X"});
}
private:
PoolParam param_;
};
} // namespace operators
} // namespace paddle_mobile
# gen test
ADD_EXECUTABLE(test-conv-op operators/test_cov_op.cpp test_helper.h test_include.h)
ADD_EXECUTABLE(test-conv-op operators/test_cov_op.cpp test_helper.h test_include.h framework/executor_for_test.h framework/executor_for_test.cpp)
target_link_libraries(test-conv-op paddle-mobile)
# gen test
......@@ -32,5 +32,11 @@ ADD_EXECUTABLE(test-load framework/test_load.cpp)
target_link_libraries(test-load paddle-mobile)
# gen test log
# gen test
ADD_EXECUTABLE(test-optimize framework/test_optimize.cpp)
target_link_libraries(test-optimize paddle-mobile)
#gen test
ADD_EXECUTABLE(test-pool operators/test_pool_op.cpp test_helper.h test_include.h framework/executor_for_test.h framework/executor_for_test.cpp)
target_link_libraries(test-pool paddle-mobile)
\ No newline at end of file
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "executor_for_test.h"
template <typename DeviceType, typename OpType>
Executor4Test<DeviceType, OpType>::Executor4Test(const Program<DeviceType> p,
std::string op_type)
: Executor<DeviceType>(p) {
if (this->program_.originProgram == nullptr) {
LOG(paddle_mobile::LogLevel::kLOG_ERROR)
<< "to_predict_program_ == nullptr";
}
const std::vector<std::shared_ptr<BlockDesc>> blocks =
this->to_predict_program_->Blocks();
for (int i = 0; i < blocks.size(); ++i) {
std::shared_ptr<BlockDesc> block_desc = blocks[i];
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
for (int j = 0; j < ops.size(); ++j) {
std::shared_ptr<OpDesc> op = ops[j];
if (op->Type() == op_type) {
std::shared_ptr<OpType> op_ptr = std::make_shared<OpType>(
op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), this->program_.scope);
this->ops_of_block_[*block_desc.get()].push_back(op_ptr);
break;
}
}
}
}
template <typename DeviceType, typename OpType>
std::shared_ptr<Tensor>
Executor4Test<DeviceType, OpType>::predict(Tensor &t, std::string input,
std::string output, DDim dDim) {
auto scope = this->program_.scope;
Variable *g_feed_value = scope->Var(input);
auto tensor = g_feed_value->GetMutable<Tensor>();
tensor->ShareDataWith(t);
Variable *con_output = scope->Var(output);
Tensor *output_tensor = con_output->GetMutable<Tensor>();
output_tensor->mutable_data<float>(dDim);
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
Executor<DeviceType>::predict(t, 0);
return out_tensor;
}
template class Executor4Test<
paddle_mobile::CPU,
paddle_mobile::operators::ConvOp<paddle_mobile::CPU, float>>;
template class Executor4Test<
paddle_mobile::CPU,
paddle_mobile::operators::PoolOp<paddle_mobile::CPU, float>>;
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/log.h"
#include "framework/executor.h"
#include "operators/conv_op.h"
#include "operators/pool_op.h"
using namespace paddle_mobile::framework;
template <typename DeviceType, typename OpType>
class Executor4Test : public Executor<DeviceType> {
public:
Executor4Test(const Program<DeviceType> p, std::string op_type);
std::shared_ptr<Tensor> predict(Tensor &t, std::string input,
std::string output, DDim dDim);
};
......@@ -16,49 +16,31 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "../framework/executor_for_test.h"
#include "../test_helper.h"
#include "framework/executor.h"
#include "io.h"
int main() {
std::string data_set = "cifar10";
//
// if (data_set == "cifar10") {
// SetupTensor<float>(&input, {FLAGS_batch_size, 3, 32, 32},
// static_cast<float>(0), static_cast<float>(1));
// } else if (data_set == "imagenet") {
// SetupTensor<float>(&input, {FLAGS_batch_size, 3, 224, 224},
// static_cast<float>(0), static_cast<float>(1));
// } else {
// LOG(FATAL) << "Only cifar10 or imagenet is supported.";
// }
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../../../test/models/googlenet"));
auto program = loader.Load(std::string("../models/googlenet"));
if (program.originProgram == nullptr) {
DLOG << "program file read fail";
}
paddle_mobile::framework::Executor<paddle_mobile::CPU> executor(program);
Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::ConvOp<paddle_mobile::CPU, float>>
executor(program, "conv2d");
paddle_mobile::framework::Tensor input;
SetupTensor<float>(&input, {1, 3, 32, 32}, static_cast<float>(0),
static_cast<float>(1));
float *input_ptr = input.data<float>();
for (int i = 0; i < input.numel(); ++i) {
// std::cout << input_ptr[i] << std::endl;
}
// std::cout << "input: " << input.memory_size() << std::endl;
// std::cout << "input: " << input.numel() << std::endl;
auto output = executor.predict(input);
// std::cout << "output: " << output->memory_size() << std::endl;
// std::cout << "output: " << output->numel() << std::endl;
// float* output_ptr = output->data<float>();
// for (int j = 0; j < output->numel(); ++j) {
// std::cout << " value of output: " << output_ptr[j] << std::endl;
//
auto output =
executor.predict(input, "data", "conv2d_0.tmp_0", {1, 64, 56, 56});
float *output_ptr = output->data<float>();
for (int j = 0; j < output->numel(); ++j) {
DLOG << " value of output: " << output_ptr[j];
}
return 0;
}
/* Copyright (c) 2016 Baidu, Inc. All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#include "../framework/executor_for_test.h"
#include "../test_helper.h"
#include "io.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
auto program = loader.Load(std::string("../models/googlenet"));
if (program.originProgram == nullptr) {
DLOG << "program read file";
}
Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::PoolOp<paddle_mobile::CPU, float>>
executor(program, "pool2d");
paddle_mobile::framework::Tensor input;
SetupTensor<float>(&input, {1, 64, 112, 112}, static_cast<float>(0),
static_cast<float>(1));
auto output = executor.predict(input, "conv2d_0.tmp_1", "pool2d_0.tmp_0",
{1, 64, 56, 56});
float *output_ptr = output->data<float>();
for (int j = 0; j < output->numel(); ++j) {
DLOG << " value of output: " << output_ptr[j];
}
return 0;
}
......@@ -16,6 +16,7 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
==============================================================================*/
#pragma once
#include "common/log.h"
#include "framework/ddim.h"
#include "framework/tensor.h"
#include <random>
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册