提交 2219a89a 编写于 作者: L liuruilong

commit conv transpose

上级 8622d667
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#include "operators/conv_transpose_op.h"
namespace paddle_mobile {
namespace operators {}
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "operators/kernel/conv_transpose_kernel.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class ConvOpTranspose : public framework::OperatorWithKernel<
DeviceType, ConvTransposeParam,
operators::ConvTransposeKernel<DeviceType, T>> {
public:
ConvOpTranspose(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, ConvTransposeParam,
operators::ConvTransposeKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const {
auto input = this->param_.Input();
auto in_dims = input->dims();
auto filter = this->param_.Filter();
auto filter_dims = filter->dims();
std::vector<int> strides = this->param_.Strides();
std::vector<int> paddings = this->param_.Paddings();
std::vector<int> dilations = this->param_.Dilations();
int groups = this->param_.Groups();
PADDLE_MOBILE_ENFORCE(
in_dims.size() == 4 || in_dims.size() == 5,
"ConvTransposeOp intput should be 4-D or 5-D tensor.");
PADDLE_MOBILE_ENFORCE(
in_dims.size() == filter_dims.size(),
"ConvTransposeOp input dimension and filter dimension "
"should be the same.");
PADDLE_MOBILE_ENFORCE(
in_dims.size() - strides.size() == 2U,
"ConvTransposeOp input dimension and strides dimension should "
"be consistent.");
PADDLE_MOBILE_ENFORCE(paddings.size() == strides.size(),
"ConvTransposeOp paddings dimension and strides "
"dimension should be the same.");
PADDLE_MOBILE_ENFORCE(paddings.size() == dilations.size(),
"ConvTransposeOp paddings dimension and dilations "
"dimension should be the same.");
PADDLE_MOBILE_ENFORCE(
in_dims[1] == filter_dims[0],
"In ConvTransposeOp, The number of input channels should "
"be equal to the number of filter's channels.");
std::vector<int64_t> output_shape({in_dims[0], filter_dims[1] * groups});
for (size_t i = 0; i < strides.size(); ++i) {
auto filter_extent = dilations[i] * (filter_dims[i + 2] - 1) + 1;
output_shape.push_back((in_dims[i + 2] - 1) * strides[i] -
2 * paddings[i] + filter_extent);
}
this->param_.Output()->Resize(framework::make_ddim(output_shape));
}
private:
};
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#include "operators/kernel/conv_transpose_kernel.h"
#include "operators/kernel/central-arm-func/conv_transpose_arm_func.h"
namespace paddle_mobile {
namespace operators {
template <>
bool ConvTransposeKernel<CPU, float>::Init(ConvTransposeParam *param) {
return true;
}
template <>
void ConvTransposeKernel<CPU, float>::Compute(
const ConvTransposeParam &param) const {
ConvTransposeCompute<float>(param);
}
template class ConvTransposeKernel<CPU, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#include <vector>
#include "framework/ddim.h"
#include "operators/math/im2col.h"
#include "operators/math/math_function.h"
#include "operators/math/vol2col.h"
#include "operators/op_param.h"
#pragma once
namespace paddle_mobile {
namespace operators {
template <typename P>
void ConvTransposeCompute(const ConvTransposeParam &param) {
const Tensor *input = param.Input();
Tensor filter = *param.Filter();
Tensor *output = param.Output();
auto strides = param.Strides();
auto paddings = param.Paddings();
auto dilations = param.Dilations();
auto groups = param.Groups();
const int batch_size = input->dims()[0];
std::vector<int64_t> input_shape_vec = framework::vectorize(input->dims());
std::vector<int64_t> filter_shape_vec = framework::vectorize(filter.dims());
size_t data_dim = filter_shape_vec.size() - 2;
// 5 或者 7
std::vector<int64_t> col_shape_vec(1 + 2 * data_dim);
// output c / groups
col_shape_vec[0] = output->dims()[1] / groups;
for (size_t i = 0; i < data_dim; ++i) {
// filter shape filter h filter w
col_shape_vec[i + 1] = filter_shape_vec[i + 2];
// input shape input h input w
col_shape_vec[i + 1 + data_dim] = input_shape_vec[i + 2];
}
framework::DDim col_shape(framework::make_ddim(col_shape_vec));
framework::DDim col_matrix_shape =
framework::flatten_to_2d(col_shape, data_dim + 1);
Tensor col;
col.mutable_data<P>(col_shape);
Tensor col_matrix;
col_matrix.ShareDataWith(col);
col_matrix.Resize(col_matrix_shape);
framework::DDim output_shape =
framework::slice_ddim(output->dims(), 1, output->dims().size());
framework::DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]};
// filter size: (m, c/g * k_h * k_w) or (m, c/g * k_d * k_h * k_w)
framework::DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]};
filter.Resize(filter_matrix_shape);
output->mutable_data<P>();
int in_step = static_cast<int>(input->dims()[1]) / groups;
int out_step = static_cast<int>(output->dims()[1]) / groups;
math::Col2ImFunctor<math::ColFormat::kCFO, CPU, P> col2im;
math::Col2VolFunctor<CPU, P> col2vol;
for (int i = 0; i < batch_size; ++i) {
Tensor input_batch = input->Slice(i, i + 1).Resize(input_matrix_shape);
Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape);
for (int g = 0; g < groups; ++g) {
Tensor in_slice = input_batch.Slice(g * in_step, (g + 1) * in_step);
Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step);
Tensor out_slice = output_batch.Slice(g * out_step, (g + 1) * out_step);
math::matmul(filter_slice, true, in_slice, false, static_cast<P>(1.0),
&col_matrix, static_cast<P>(0.0));
if (data_dim == 2U) {
col2im(col, dilations, strides,
std::vector<int>{paddings[0], paddings[1], paddings[0],
paddings[1]},
&out_slice);
} else if (data_dim == 3U) {
col2vol(col, dilations, strides, paddings, &out_slice);
}
}
}
}
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_TRANSPOSE
#pragma once
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
using framework::OpKernelBase;
template <typename DeviceType, typename T>
class ConvTransposeKernel
: public OpKernelBase<DeviceType, ConvTransposeParam> {
public:
void Compute(const ConvTransposeParam &param) const;
bool Init(ConvTransposeParam *param);
};
} // namespace operators
} // namespace paddle_mobile
#endif // PADDLE_MOBILE_DE_CONV_KERNEL_H
......@@ -1419,5 +1419,45 @@ class DropoutParam : public OpParam {
};
#endif
#ifdef CONV_TRANSPOSE
class ConvTransposeParam : public OpParam {
public:
ConvTransposeParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const AttributeMap &attrs,
const Scope &scope) {
filter_ = FilterFrom<LoDTensor>(inputs, scope);
input_ = InputFrom<LoDTensor>(inputs, scope);
output_ = OutputFrom<LoDTensor>(outputs, scope);
strides_ = GetAttr<vector<int>>("strides", attrs);
paddings_ = GetAttr<vector<int>>("paddings", attrs);
dilations_ = GetAttr<vector<int>>("dilations", attrs);
groups = GetAttr<int>("groups", attrs);
}
const Tensor *Input() const { return input_; }
const Tensor *Filter() const { return filter_; }
Tensor *Output() const { return output_; }
const vector<int> &Strides() const { return strides_; }
const vector<int> &Paddings() const { return paddings_; }
const vector<int> &Dilations() const { return dilations_; }
const int &Groups() const { return groups; }
private:
Tensor *input_;
Tensor *output_;
Tensor *filter_;
vector<int> strides_;
vector<int> paddings_;
vector<int> dilations_;
int groups;
};
#endif
} // namespace operators
} // namespace paddle_mobile
......@@ -90,6 +90,7 @@ endif()
if(NOT FOUND_MATCH)
message("--default--")
set(BATCHNORM_OP ON)
set(CONV_TRANSPOSE_OP ON)
set(BOXCODER_OP ON)
set(CONCAT_OP ON)
set(CONV_OP ON)
......@@ -243,3 +244,6 @@ if (REGION_OP)
add_definitions(-DREGION_OP)
endif()
if (CONV_TRANSPOSE_OP)
add_definitions(-DCONV_TRANSPOSE)
endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册