提交 fb6577bb 编写于 作者: Y Yao,kun

code style

上级 abb4bb07
......@@ -17,15 +17,15 @@ limitations under the License. */
namespace paddle_mobile {
namespace operators {
int Im2SequenceOutputSize(int input_size, int kernel, int padding_1, int padding_2,
int stride) {
int output_size = 1 + (padding_1 + padding_2 + input_size - kernel + stride - 1) / stride;
int Im2SequenceOutputSize(int input_size, int kernel, int padding_1,
int padding_2, int stride) {
int output_size =
1 + (padding_1 + padding_2 + input_size - kernel + stride - 1) / stride;
return output_size;
}
template <typename Dtype, typename T>
void Im2SequenceOp<Dtype, T>::InferShape() const {
auto in_x_dims = param_.Input()->dims();
const std::vector<int> &kernels = param_.Kernels();
......
......@@ -14,9 +14,9 @@ limitations under the License. */
#pragma once
#include <operators/op_param.h>
#include "framework/operator.h"
#include "operators/kernel/im2sequence_kernel.h"
#include <operators/op_param.h>
namespace paddle_mobile {
namespace operators {
......@@ -27,7 +27,8 @@ template <typename DeviceType, typename T>
class Im2SequenceOp : public framework::OperatorWithKernel<DeviceType> {
public:
Im2SequenceOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType>(type, inputs, outputs, attrs,
scope),
......
......@@ -25,7 +25,8 @@ inline int Im2SeqOutputSize(int input_size, int filter_size, int padding_0,
}
template <>
void Im2SequenceKernel<CPU, float>::Compute(const Im2SequenceParam &param) const {
void Im2SequenceKernel<CPU, float>::Compute(
const Im2SequenceParam &param) const {
const Tensor *in_x = param.Input();
Tensor *out = param.Output();
out->mutable_data<float>();
......@@ -46,15 +47,15 @@ void Im2SequenceKernel<CPU, float>::Compute(const Im2SequenceParam &param) const
paddings[3], strides[1]);
const std::vector<int> dilations({1, 1});
//TODO: verify
// TODO: verify
auto out_dims = out->dims();
out->Resize({batch_size, out->numel() / batch_size});
for (int i = 0; i < batch_size; i++) {
const Tensor src =
in_x->Slice(i, i + 1).Resize({img_channels, img_height, img_width});
in_x->Slice(i, i + 1).Resize({img_channels, img_height, img_width});
Tensor dst = out->Slice(i, i + 1).Resize(
{output_height, output_width, img_channels, kernels[0], kernels[1]});
{output_height, output_width, img_channels, kernels[0], kernels[1]});
math::Im2ColFunctor<math::ColFormat::kOCF, CPU, float> f;
f(src, dilations, strides, paddings, &dst);
......
......@@ -26,7 +26,8 @@ namespace operators {
using namespace framework;
template <typename DeviceType, typename T>
class Im2SequenceKernel : public framework::OpKernelBase<DeviceType, Im2SequenceParam> {
class Im2SequenceKernel
: public framework::OpKernelBase<DeviceType, Im2SequenceParam> {
public:
void Compute(const Im2SequenceParam &param) const;
};
......
......@@ -850,8 +850,9 @@ class FusionConvAddReluParam : public FusionConvAddParam {
class Im2SequenceParam : public OpParam {
public:
Im2SequenceParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) {
Im2SequenceParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
input_x_ = InputXFrom<LoDTensor>(inputs, scope);
out_ = OutFrom<LoDTensor>(outputs, scope);
kernels_ = GetAttr<vector<int>>("kernels", attrs);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册