未验证 提交 e2636a12 编写于 作者: R Ray Liu 提交者: GitHub

Merge pull request #1344 from hjchen2/dev-latest

Refactor pooling and dequant fusion implementation, fix some code style
...@@ -16,9 +16,9 @@ limitations under the License. */ ...@@ -16,9 +16,9 @@ limitations under the License. */
#ifdef ENABLE_EXCEPTION #ifdef ENABLE_EXCEPTION
#include <stdio.h> #include <stdio.h>
#include <stdlib.h>
#include <exception> #include <exception>
#include <string> #include <string>
#endif #endif
namespace paddle_mobile { namespace paddle_mobile {
......
...@@ -24,7 +24,6 @@ const char *G_OP_TYPE_CONCAT = "concat"; ...@@ -24,7 +24,6 @@ const char *G_OP_TYPE_CONCAT = "concat";
const char *G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add"; const char *G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add";
const char *G_OP_TYPE_FILL_CONSTANT = "fill_constant"; const char *G_OP_TYPE_FILL_CONSTANT = "fill_constant";
const char *G_OP_TYPE_FUSION_CONV_ADD_RELU = "fusion_conv_add_relu"; const char *G_OP_TYPE_FUSION_CONV_ADD_RELU = "fusion_conv_add_relu";
const char *G_OP_TYPE_FUSION_CONV_ADD_RELU_INT8 = "fusion_conv_add_relu_int8";
const char *G_OP_TYPE_FUSION_CONV_ADD_PRELU = "fusion_conv_add_prelu"; const char *G_OP_TYPE_FUSION_CONV_ADD_PRELU = "fusion_conv_add_prelu";
const char *G_OP_TYPE_FUSION_CONV_ADD_ADD_PRELU = "fusion_conv_add_add_prelu"; const char *G_OP_TYPE_FUSION_CONV_ADD_ADD_PRELU = "fusion_conv_add_add_prelu";
const char *G_OP_TYPE_FUSION_CONV_ADD_BN_RELU = "fusion_conv_add_bn_relu"; const char *G_OP_TYPE_FUSION_CONV_ADD_BN_RELU = "fusion_conv_add_bn_relu";
...@@ -32,7 +31,6 @@ const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU = "fusion_conv_bn_add_relu"; ...@@ -32,7 +31,6 @@ const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU = "fusion_conv_bn_add_relu";
const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU = "fusion_dwconv_bn_relu"; const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU = "fusion_dwconv_bn_relu";
const char *G_OP_TYPE_FUSION_CONV_BN_RELU = "fusion_conv_bn_relu"; const char *G_OP_TYPE_FUSION_CONV_BN_RELU = "fusion_conv_bn_relu";
const char *G_OP_TYPE_FC = "fusion_fc"; const char *G_OP_TYPE_FC = "fusion_fc";
const char *G_OP_TYPE_FC_INT8 = "fusion_fc_int8";
const char *G_OP_TYPE_FUSION_CONV_ADD = "fusion_conv_add"; const char *G_OP_TYPE_FUSION_CONV_ADD = "fusion_conv_add";
const char *G_OP_TYPE_LRN = "lrn"; const char *G_OP_TYPE_LRN = "lrn";
const char *G_OP_TYPE_MUL = "mul"; const char *G_OP_TYPE_MUL = "mul";
...@@ -41,6 +39,7 @@ const char *G_OP_TYPE_POLYGON_BOX_TRANSFORM = "polygon_box_transform"; ...@@ -41,6 +39,7 @@ const char *G_OP_TYPE_POLYGON_BOX_TRANSFORM = "polygon_box_transform";
const char *G_OP_TYPE_POOL2D = "pool2d"; const char *G_OP_TYPE_POOL2D = "pool2d";
const char *G_OP_TYPE_PRIOR_BOX = "prior_box"; const char *G_OP_TYPE_PRIOR_BOX = "prior_box";
const char *G_OP_TYPE_RELU = "relu"; const char *G_OP_TYPE_RELU = "relu";
const char *G_OP_TYPE_RELU6 = "relu6";
const char *G_OP_TYPE_RESHAPE = "reshape"; const char *G_OP_TYPE_RESHAPE = "reshape";
const char *G_OP_TYPE_RESHAPE2 = "reshape2"; const char *G_OP_TYPE_RESHAPE2 = "reshape2";
const char *G_OP_TYPE_SIGMOID = "sigmoid"; const char *G_OP_TYPE_SIGMOID = "sigmoid";
...@@ -73,9 +72,14 @@ const char *G_OP_TYPE_SUM = "sum"; ...@@ -73,9 +72,14 @@ const char *G_OP_TYPE_SUM = "sum";
const char *G_OP_TYPE_QUANTIZE = "quantize"; const char *G_OP_TYPE_QUANTIZE = "quantize";
const char *G_OP_TYPE_DEQUANTIZE = "dequantize"; const char *G_OP_TYPE_DEQUANTIZE = "dequantize";
const char *G_OP_TYPE_FUSION_DEQUANT_BN = "fusion_dequant_bn";
const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN = "fusion_dequant_add_bn"; const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN = "fusion_dequant_add_bn";
const char *G_OP_TYPE_FUSION_DEQUANT_BN_RELU = "fusion_dequant_bn_relu"; const char *G_OP_TYPE_FUSION_DEQUANT_BN_RELU = "fusion_dequant_bn_relu";
const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU = "fusion_dequant_add_bn_relu"; const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU = "fusion_dequant_add_bn_relu";
const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_QUANT =
"fusion_dequant_add_bn_quant";
const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU_QUANT =
"fusion_dequant_add_bn_relu_quant";
const char *G_OP_TYPE_TANH = "tanh"; const char *G_OP_TYPE_TANH = "tanh";
const char *G_OP_TYPE_FUSION_DECONV_RELU = "fusion_deconv_relu"; const char *G_OP_TYPE_FUSION_DECONV_RELU = "fusion_deconv_relu";
...@@ -91,6 +95,7 @@ std::unordered_map< ...@@ -91,6 +95,7 @@ std::unordered_map<
{G_OP_TYPE_PRELU, {{"X", "Alpha"}, {"Out"}}}, {G_OP_TYPE_PRELU, {{"X", "Alpha"}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_ADD, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_CONV_ADD, {{"Input"}, {"Out"}}},
{G_OP_TYPE_RELU, {{"X"}, {"Out"}}}, {G_OP_TYPE_RELU, {{"X"}, {"Out"}}},
{G_OP_TYPE_RELU6, {{"X"}, {"Out"}}},
{G_OP_TYPE_SOFTMAX, {{"X"}, {"Out"}}}, {G_OP_TYPE_SOFTMAX, {{"X"}, {"Out"}}},
{G_OP_TYPE_SIGMOID, {{"X"}, {"Out"}}}, {G_OP_TYPE_SIGMOID, {{"X"}, {"Out"}}},
{G_OP_TYPE_MUL, {{"X"}, {"Out"}}}, {G_OP_TYPE_MUL, {{"X"}, {"Out"}}},
...@@ -112,13 +117,11 @@ std::unordered_map< ...@@ -112,13 +117,11 @@ std::unordered_map<
{G_OP_TYPE_MULTICLASS_NMS, {{"BBoxes", "Scores"}, {"Out"}}}, {G_OP_TYPE_MULTICLASS_NMS, {{"BBoxes", "Scores"}, {"Out"}}},
{G_OP_TYPE_POLYGON_BOX_TRANSFORM, {{"Input"}, {"Output"}}}, {G_OP_TYPE_POLYGON_BOX_TRANSFORM, {{"Input"}, {"Output"}}},
{G_OP_TYPE_FC, {{"X", "Y", "Z"}, {"Out"}}}, {G_OP_TYPE_FC, {{"X", "Y", "Z"}, {"Out"}}},
{G_OP_TYPE_FC_INT8, {{"X", "Y", "Z", "Scale"}, {"Out"}}},
{G_OP_TYPE_RESHAPE, {{"X"}, {"Out"}}}, {G_OP_TYPE_RESHAPE, {{"X"}, {"Out"}}},
{G_OP_TYPE_RESHAPE2, {{"X"}, {"Out", "XShape"}}}, {G_OP_TYPE_RESHAPE2, {{"X"}, {"Out", "XShape"}}},
{G_OP_TYPE_DEPTHWISE_CONV, {{"Input"}, {"Output"}}}, {G_OP_TYPE_DEPTHWISE_CONV, {{"Input"}, {"Output"}}},
{G_OP_TYPE_FILL_CONSTANT, {{}, {"Out"}}}, {G_OP_TYPE_FILL_CONSTANT, {{}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_ADD_RELU, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_CONV_ADD_RELU, {{"Input"}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_ADD_RELU_INT8, {{"Input", "Scale"}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_ADD_PRELU, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_CONV_ADD_PRELU, {{"Input"}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_ADD_ADD_PRELU, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_CONV_ADD_ADD_PRELU, {{"Input"}, {"Out"}}},
{G_OP_TYPE_IM2SEQUENCE, {{"X"}, {"Out"}}}, {G_OP_TYPE_IM2SEQUENCE, {{"X"}, {"Out"}}},
...@@ -142,9 +145,14 @@ std::unordered_map< ...@@ -142,9 +145,14 @@ std::unordered_map<
{G_OP_TYPE_ELEMENTWISE_MUL, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_ELEMENTWISE_MUL, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_QUANTIZE, {{"X"}, {"Out", "OutScale"}}}, {G_OP_TYPE_QUANTIZE, {{"X"}, {"Out", "OutScale"}}},
{G_OP_TYPE_DEQUANTIZE, {{"X", "Scale"}, {"Out"}}}, {G_OP_TYPE_DEQUANTIZE, {{"X", "Scale"}, {"Out"}}},
{G_OP_TYPE_FUSION_DEQUANT_ADD_BN, {{"X", "Scale"}, {"Y"}}}, {G_OP_TYPE_FUSION_DEQUANT_BN, {{"X", "Scale"}, {"Out"}}},
{G_OP_TYPE_FUSION_DEQUANT_ADD_BN, {{"X", "Scale"}, {"Out"}}},
{G_OP_TYPE_FUSION_DEQUANT_BN_RELU, {{"X", "Scale"}, {"Out"}}}, {G_OP_TYPE_FUSION_DEQUANT_BN_RELU, {{"X", "Scale"}, {"Out"}}},
{G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU, {{"X", "Scale"}, {"Out"}}}, {G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU, {{"X", "Scale"}, {"Out"}}},
{G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU_QUANT,
{{"X", "Scale"}, {"Out", "OutScale"}}},
{G_OP_TYPE_FUSION_DEQUANT_ADD_BN_QUANT,
{{"X", "Scale"}, {"Out", "OutScale"}}},
{G_OP_TYPE_TANH, {{"X"}, {"Out"}}}, {G_OP_TYPE_TANH, {{"X"}, {"Out"}}},
{G_OP_TYPE_FUSION_DECONV_RELU, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_DECONV_RELU, {{"Input"}, {"Out"}}},
{G_OP_TYPE_FUSION_DECONV_ADD, {{"Input"}, {"Out"}}}, {G_OP_TYPE_FUSION_DECONV_ADD, {{"Input"}, {"Out"}}},
......
...@@ -87,10 +87,24 @@ enum PMStatus { ...@@ -87,10 +87,24 @@ enum PMStatus {
}; };
enum RoundType { enum RoundType {
ROUND_UNK = 0, ROUND_NEAREST_AWAY_ZERO = 0,
ROUND_NEAREST_AWAY_ZERO = 1, ROUND_NEAREST_TOWARDS_ZERO = 1,
ROUND_NEAREST_TOWARDS_ZERO = 2, ROUND_NEAREST_TO_EVEN = 2,
ROUND_NEAREST_TO_EVEN = 3 };
enum ActivationType {
IDENTITY = 0,
RELU = 1,
RELU6 = 2,
PRELU = 3,
LEAKY_RELU = 4,
TANH = 5,
SIGMOID = 6,
};
enum PoolingType {
MAX = 0,
AVG = 1,
}; };
extern const char *G_OP_TYPE_CONV; extern const char *G_OP_TYPE_CONV;
...@@ -99,11 +113,9 @@ extern const char *G_OP_TYPE_BOX_CODER; ...@@ -99,11 +113,9 @@ extern const char *G_OP_TYPE_BOX_CODER;
extern const char *G_OP_TYPE_CONCAT; extern const char *G_OP_TYPE_CONCAT;
extern const char *G_OP_TYPE_ELEMENTWISE_ADD; extern const char *G_OP_TYPE_ELEMENTWISE_ADD;
extern const char *G_OP_TYPE_FUSION_CONV_ADD_RELU; extern const char *G_OP_TYPE_FUSION_CONV_ADD_RELU;
extern const char *G_OP_TYPE_FUSION_CONV_ADD_RELU_INT8;
extern const char *G_OP_TYPE_FUSION_CONV_ADD_PRELU; extern const char *G_OP_TYPE_FUSION_CONV_ADD_PRELU;
extern const char *G_OP_TYPE_FUSION_CONV_ADD_ADD_PRELU; extern const char *G_OP_TYPE_FUSION_CONV_ADD_ADD_PRELU;
extern const char *G_OP_TYPE_FC; extern const char *G_OP_TYPE_FC;
extern const char *G_OP_TYPE_FC_INT8;
extern const char *G_OP_TYPE_FUSION_CONV_ADD; extern const char *G_OP_TYPE_FUSION_CONV_ADD;
extern const char *G_OP_TYPE_FUSION_CONV_ADD_BN_RELU; extern const char *G_OP_TYPE_FUSION_CONV_ADD_BN_RELU;
extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU; extern const char *G_OP_TYPE_FUSION_CONV_BN_ADD_RELU;
...@@ -116,6 +128,7 @@ extern const char *G_OP_TYPE_MULTICLASS_NMS; ...@@ -116,6 +128,7 @@ extern const char *G_OP_TYPE_MULTICLASS_NMS;
extern const char *G_OP_TYPE_POOL2D; extern const char *G_OP_TYPE_POOL2D;
extern const char *G_OP_TYPE_PRIOR_BOX; extern const char *G_OP_TYPE_PRIOR_BOX;
extern const char *G_OP_TYPE_RELU; extern const char *G_OP_TYPE_RELU;
extern const char *G_OP_TYPE_RELU6;
extern const char *G_OP_TYPE_RESHAPE; extern const char *G_OP_TYPE_RESHAPE;
extern const char *G_OP_TYPE_SIGMOID; extern const char *G_OP_TYPE_SIGMOID;
extern const char *G_OP_TYPE_SOFTMAX; extern const char *G_OP_TYPE_SOFTMAX;
...@@ -140,9 +153,12 @@ extern const char *G_OP_TYPE_ELEMENTWISE_MUL; ...@@ -140,9 +153,12 @@ extern const char *G_OP_TYPE_ELEMENTWISE_MUL;
extern const char *G_OP_TYPE_QUANTIZE; extern const char *G_OP_TYPE_QUANTIZE;
extern const char *G_OP_TYPE_DEQUANTIZE; extern const char *G_OP_TYPE_DEQUANTIZE;
extern const char *G_OP_TYPE_FUSION_DEQUANT_BN;
extern const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN; extern const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN;
extern const char *G_OP_TYPE_FUSION_DEQUANT_BN_RELU; extern const char *G_OP_TYPE_FUSION_DEQUANT_BN_RELU;
extern const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU; extern const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU;
extern const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_QUANT;
extern const char *G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU_QUANT;
extern const char *G_OP_TYPE_TANH; extern const char *G_OP_TYPE_TANH;
extern const char *G_OP_TYPE_FUSION_DECONV_RELU; extern const char *G_OP_TYPE_FUSION_DECONV_RELU;
......
...@@ -302,8 +302,16 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict( ...@@ -302,8 +302,16 @@ std::shared_ptr<framework::Tensor> Executor<Dtype, P>::Predict(
for (int i = 0; i < profile.size(); i++) { for (int i = 0; i < profile.size(); i++) {
const auto &pInfo = profile[i]; const auto &pInfo = profile[i];
uint64_t timeCost = pInfo.runEnd - pInfo.runBegin; uint64_t timeCost = pInfo.runEnd - pInfo.runBegin;
if (ops[i]->Type() == "conv2d") {
auto inputs = ops[i]->Inputs();
auto *filter = framework::GetVarValue<framework::LoDTensor>(
"Filter", inputs, *(program_.scope));
int kernel_size = filter->dims()[2];
_tp[ops[i]->Type() + "_" + std::to_string(kernel_size)] += timeCost;
} else {
_tp[ops[i]->Type()] += timeCost; _tp[ops[i]->Type()] += timeCost;
} }
}
printf("====================[ profile ]======================\n"); printf("====================[ profile ]======================\n");
using prof_t = std::pair<std::string, uint64_t>; using prof_t = std::pair<std::string, uint64_t>;
std::vector<prof_t> _tv(_tp.begin(), _tp.end()); std::vector<prof_t> _tv(_tp.begin(), _tp.end());
...@@ -372,6 +380,14 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod( ...@@ -372,6 +380,14 @@ std::shared_ptr<framework::LoDTensor> Executor<Dtype, P>::PredictLod(
for (int i = 0; i < profile.size(); i++) { for (int i = 0; i < profile.size(); i++) {
const auto &pInfo = profile[i]; const auto &pInfo = profile[i];
uint64_t timeCost = pInfo.runEnd - pInfo.runBegin; uint64_t timeCost = pInfo.runEnd - pInfo.runBegin;
if (ops[i]->Type() == "conv2d") {
auto inputs = ops[i]->Inputs();
auto input_keys = ops[i]->GetInputKeys();
auto *filter = framework::GetVarValue<framework::LoDTensor>(
input_keys[1], inputs, *(program_.scope));
int kernel_size = filter->dims()[2];
printf("kernel size: %d\n", kernel_size);
}
_tp[ops[i]->Type()] += timeCost; _tp[ops[i]->Type()] += timeCost;
} }
printf("====================[ profile ]======================\n"); printf("====================[ profile ]======================\n");
......
...@@ -191,6 +191,7 @@ LOAD_OP2(mul, CPU, MALI_GPU); ...@@ -191,6 +191,7 @@ LOAD_OP2(mul, CPU, MALI_GPU);
#endif #endif
#ifdef RELU_OP #ifdef RELU_OP
LOAD_OP2(relu, CPU, MALI_GPU); LOAD_OP2(relu, CPU, MALI_GPU);
LOAD_OP1(relu6, CPU);
#endif #endif
#ifdef IM2SEQUENCE_OP #ifdef IM2SEQUENCE_OP
LOAD_OP1(im2sequence, CPU); LOAD_OP1(im2sequence, CPU);
...@@ -233,6 +234,10 @@ LOAD_OP1(quantize, CPU); ...@@ -233,6 +234,10 @@ LOAD_OP1(quantize, CPU);
#ifdef DEQUANT_OP #ifdef DEQUANT_OP
LOAD_OP1(dequantize, CPU); LOAD_OP1(dequantize, CPU);
#endif #endif
#ifdef FUSION_DEQUANT_BN_OP
LOAD_OP1(fusion_dequant_bn, CPU);
LOAD_FUSION_MATCHER(fusion_dequant_bn);
#endif
#ifdef FUSION_DEQUANT_ADD_BN_OP #ifdef FUSION_DEQUANT_ADD_BN_OP
LOAD_OP1(fusion_dequant_add_bn, CPU); LOAD_OP1(fusion_dequant_add_bn, CPU);
LOAD_FUSION_MATCHER(fusion_dequant_add_bn); LOAD_FUSION_MATCHER(fusion_dequant_add_bn);
...@@ -245,3 +250,11 @@ LOAD_FUSION_MATCHER(fusion_dequant_bn_relu); ...@@ -245,3 +250,11 @@ LOAD_FUSION_MATCHER(fusion_dequant_bn_relu);
LOAD_OP1(fusion_dequant_add_bn_relu, CPU); LOAD_OP1(fusion_dequant_add_bn_relu, CPU);
LOAD_FUSION_MATCHER(fusion_dequant_add_bn_relu); LOAD_FUSION_MATCHER(fusion_dequant_add_bn_relu);
#endif #endif
#ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP
LOAD_OP1(fusion_dequant_add_bn_quant, CPU);
LOAD_FUSION_MATCHER(fusion_dequant_add_bn_quant);
#endif
#ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
LOAD_OP1(fusion_dequant_add_bn_relu_quant, CPU);
LOAD_FUSION_MATCHER(fusion_dequant_add_bn_relu_quant);
#endif
...@@ -98,24 +98,6 @@ class OpRegistry { ...@@ -98,24 +98,6 @@ class OpRegistry {
} }
}; };
#define REGISTER_OPERATOR_INT8(op_type, op_class, device_name, device_type) \
template class op_class<device_type, int8_t>; \
template <typename Dtype, typename T> \
class _OpClass_##op_type##_##device_name : public op_class<Dtype, T> { \
public: \
DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_##device_name, op_class); \
}; \
static paddle_mobile::framework::OperatorRegistrar< \
device_type, _OpClass_##op_type##_##device_name<device_type, int8_t>> \
__op_registrar_##op_type##_##device_name(#op_type); \
int TouchOpRegistrar_##op_type##_##device_name() { \
__op_registrar_##op_type##_##device_name.Touch(); \
return 0; \
}
#define REGISTER_OPERATOR_CPU_INT8(op_type, op_class) \
REGISTER_OPERATOR_INT8(op_type, op_class, cpu, paddle_mobile::CPU);
#define REGISTER_OPERATOR(op_type, op_class, device_name, device_type) \ #define REGISTER_OPERATOR(op_type, op_class, device_name, device_type) \
template class op_class<device_type, float>; \ template class op_class<device_type, float>; \
template <typename Dtype, typename T> \ template <typename Dtype, typename T> \
......
...@@ -220,7 +220,16 @@ void Node::Folder( ...@@ -220,7 +220,16 @@ void Node::Folder(
} }
} else { } else {
for (auto &op_output : this->op_desc_->outputs_) { for (auto &op_output : this->op_desc_->outputs_) {
op_desc->outputs_.emplace(op_output.first, op_output.second); auto output_key = op_output.first;
if (change->find(this->type_) != change->end()) {
const auto change_pairs = (*change)[this->type_];
for (const auto &target : change_pairs) {
if (target.first == output_key) {
output_key = target.second;
}
}
}
op_desc->outputs_.emplace(output_key, op_output.second);
} }
for (auto &output : this->outputs_) { for (auto &output : this->outputs_) {
......
...@@ -143,12 +143,10 @@ double PaddleMobile<CPU, Precision::FP32>::GetPredictTime() { ...@@ -143,12 +143,10 @@ double PaddleMobile<CPU, Precision::FP32>::GetPredictTime() {
int t1 = 1; int t1 = 1;
int t2 = 1; int t2 = 1;
for (int i = 0; i < m * k; ++i) { for (int i = 0; i < m * k; ++i) {
unsigned int seed = 100; a[i] = t1 + rand() % t2; // NOLINT
a[i] = t1 + rand_r(&seed) % t2;
} }
for (int i = 0; i < k * n; ++i) { for (int i = 0; i < k * n; ++i) {
unsigned int seed = 200; b[i] = t1 + rand() % t2; // NOLINT
b[i] = t1 + rand_r(&seed) % t2;
} }
paddle_mobile::operators::math::Gemm gemm; paddle_mobile::operators::math::Gemm gemm;
auto time1 = paddle_mobile::time(); auto time1 = paddle_mobile::time();
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADDRELU_INT8_OP
#include "operators/fusion_conv_add_relu_int8_op.h"
#include <vector>
#include "operators/math/conv_func.h"
namespace paddle_mobile {
namespace operators {
template <typename Dtype, typename T>
void FusionConvAddReluInt8Op<Dtype, T>::InferShape() const {
auto in_dims = this->param_.Input()->dims();
auto filter_dims = this->param_.Filter()->dims();
const std::vector<int> &strides = this->param_.Strides();
std::vector<int> paddings = this->param_.Paddings();
int groups = this->param_.Groups();
std::vector<int> dilations = this->param_.Dilations();
PADDLE_MOBILE_ENFORCE((in_dims.size() == filter_dims.size() &&
dilations.size() == paddings.size() &&
paddings.size() == strides.size()),
"ConvParam is not suitable");
std::vector<int64_t> output_shape({in_dims[0], filter_dims[0]});
for (size_t i = 0; i < strides.size(); ++i) {
output_shape.push_back(
math::ConvOutputSize(in_dims[i + 2], filter_dims[i + 2], dilations[i],
paddings[i], strides[i]));
}
framework::DDim ddim = framework::make_ddim(output_shape);
this->param_.Output()->Resize(ddim);
}
} // namespace operators
} // namespace paddle_mobile
namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU_INT8(fusion_conv_add_relu_int8,
ops::FusionConvAddReluInt8Op);
#endif
#endif // FUSION_CONVADDRELU_INT8_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_CONVADDRELU_INT8_OP
#pragma once
#include <string>
#include "framework/operator.h"
#include "operators/kernel/conv_add_relu_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FusionConvAddReluInt8Op
: public framework::OperatorWithKernel<DeviceType,
FusionConvAddReluParam<DeviceType>,
ConvAddReluKernel<DeviceType, T>> {
public:
FusionConvAddReluInt8Op(const std::string &type,
const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType,
FusionConvAddReluParam<DeviceType>,
ConvAddReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const override;
};
} // namespace operators
} // namespace paddle_mobile
#endif // FUSION_CONVADDRELU_INT8_OP
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "framework/operator.h" #include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h" #include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/dequant_add_bn_kernel.h" #include "operators/kernel/dequant_bn_kernel.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -43,7 +43,8 @@ class FusionDequantAddBNMatcher : public framework::FusionOpMatcher { ...@@ -43,7 +43,8 @@ class FusionDequantAddBNMatcher : public framework::FusionOpMatcher {
{{"Scale", "BNScale"}, {{"Scale", "BNScale"},
{"Mean", "BNMean"}, {"Mean", "BNMean"},
{"Bias", "BNBias"}, {"Bias", "BNBias"},
{"Variance", "BNVariance"}}}}, {"Variance", "BNVariance"},
{"Y", "Out"}}}},
removed_nodes); removed_nodes);
} }
......
...@@ -20,7 +20,7 @@ limitations under the License. */ ...@@ -20,7 +20,7 @@ limitations under the License. */
#include <vector> #include <vector>
#include "framework/operator.h" #include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h" #include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/dequant_bn_relu_kernel.h" #include "operators/kernel/dequant_bn_kernel.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -44,7 +44,8 @@ class FusionDequantAddBNReluMatcher : public framework::FusionOpMatcher { ...@@ -44,7 +44,8 @@ class FusionDequantAddBNReluMatcher : public framework::FusionOpMatcher {
{{"Scale", "BNScale"}, {{"Scale", "BNScale"},
{"Mean", "BNMean"}, {"Mean", "BNMean"},
{"Bias", "BNBias"}, {"Bias", "BNBias"},
{"Variance", "BNVariance"}}}}, {"Variance", "BNVariance"},
{"Y", "Out"}}}},
removed_nodes); removed_nodes);
} }
...@@ -54,7 +55,7 @@ class FusionDequantAddBNReluMatcher : public framework::FusionOpMatcher { ...@@ -54,7 +55,7 @@ class FusionDequantAddBNReluMatcher : public framework::FusionOpMatcher {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class FusionDequantAddBNReluOp class FusionDequantAddBNReluOp
: public framework::OperatorWithKernel< : public framework::OperatorWithKernel<
DeviceType, FusionDequantAddBNReluParam<DeviceType>, DeviceType, FusionDequantAddBNParam<DeviceType>,
operators::FusionDequantAddBNReluKernel<DeviceType, T>> { operators::FusionDequantAddBNReluKernel<DeviceType, T>> {
public: public:
FusionDequantAddBNReluOp(const std::string &type, FusionDequantAddBNReluOp(const std::string &type,
...@@ -63,7 +64,7 @@ class FusionDequantAddBNReluOp ...@@ -63,7 +64,7 @@ class FusionDequantAddBNReluOp
const framework::AttributeMap &attrs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel< : framework::OperatorWithKernel<
DeviceType, FusionDequantAddBNReluParam<DeviceType>, DeviceType, FusionDequantAddBNParam<DeviceType>,
operators::FusionDequantAddBNReluKernel<DeviceType, T>>( operators::FusionDequantAddBNReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {} type, inputs, outputs, attrs, scope) {}
// inference output shape // inference output shape
......
...@@ -12,50 +12,51 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,50 +12,51 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifdef FUSION_FC_INT8_OP #include "operators/fusion_dequant_add_bn_relu_quant_op.h"
#include "operators/fusion_fc_int8_op.h"
#ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, typename T> template <typename Dtype, typename T>
void FusionFcInt8Op<Dtype, T>::InferShape() const { void FusionDequantAddBNReluQuantOp<Dtype, T>::InferShape() const {
auto x_dims = this->param_.InputX()->dims(); const auto& input_dims = this->param_.input_->dims();
auto y_dims = this->param_.InputY()->dims(); this->param_.output_->Resize(input_dims);
int x_num_col_dims = this->param_.XNumColDims(); }
int y_num_col_dims = this->param_.YNumColDims();
assert(x_dims.size() > x_num_col_dims);
assert(y_dims.size() > y_num_col_dims);
/// (1,2,3,4) , x_num_col_dims = 2 -> (2,12)
auto x_mat_dims = framework::flatten_to_2d(x_dims, x_num_col_dims);
auto y_mat_dims = framework::flatten_to_2d(y_dims, y_num_col_dims);
assert(x_mat_dims[1] == y_mat_dims[0]); } // namespace operators
} // namespace paddle_mobile
std::vector<int64_t> output_dims; namespace ops = paddle_mobile::operators;
output_dims.reserve( REGISTER_FUSION_MATCHER(fusion_dequant_add_bn_relu_quant,
static_cast<size_t>(x_num_col_dims + y_dims.size() - y_num_col_dims)); ops::FusionDequantAddBNReluQuantMatcher);
for (int i = 0; i < x_num_col_dims; ++i) { #ifdef PADDLE_MOBILE_CPU
output_dims.push_back(x_dims[i]); REGISTER_OPERATOR_CPU(fusion_dequant_add_bn_relu_quant,
} ops::FusionDequantAddBNReluQuantOp);
#endif
#endif // FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
for (int i = y_num_col_dims; i < y_dims.size(); ++i) { #ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP
output_dims.push_back(y_dims[i]); namespace paddle_mobile {
} namespace operators {
framework::DDim ddim = framework::make_ddim(output_dims); template <typename Dtype, typename T>
this->param_.Out()->Resize(ddim); void FusionDequantAddBNQuantOp<Dtype, T>::InferShape() const {
const auto& input_dims = this->param_.input_->dims();
this->param_.output_->Resize(input_dims);
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
REGISTER_FUSION_MATCHER(fusion_dequant_add_bn_quant,
ops::FusionDequantAddBNQuantMatcher);
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU_INT8(fusion_fc_int8, ops::FusionFcInt8Op); REGISTER_OPERATOR_CPU(fusion_dequant_add_bn_quant,
ops::FusionDequantAddBNQuantOp);
#endif #endif
#endif // FUSION_FC_INT8_OP
#endif // FUSION_DEQUANT_ADD_BN_QUANT_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/dequant_bn_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
class FusionDequantAddBNReluQuantMatcher : public framework::FusionOpMatcher {
public:
FusionDequantAddBNReluQuantMatcher() {
node_ = framework::Node(G_OP_TYPE_DEQUANTIZE);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD) >
std::make_shared<framework::Node>(G_OP_TYPE_BATCHNORM) >
std::make_shared<framework::Node>(G_OP_TYPE_RELU) >
std::make_shared<framework::Node>(G_OP_TYPE_QUANTIZE);
}
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}},
{G_OP_TYPE_BATCHNORM,
{{"Scale", "BNScale"},
{"Mean", "BNMean"},
{"Bias", "BNBias"},
{"Variance", "BNVariance"},
{"Y", "Out"}}}},
removed_nodes);
}
std::string Type() { return G_OP_TYPE_FUSION_DEQUANT_ADD_BN_RELU_QUANT; }
};
template <typename DeviceType, typename T>
class FusionDequantAddBNReluQuantOp
: public framework::OperatorWithKernel<
DeviceType, FusionDequantAddBNReluQuantParam<DeviceType>,
operators::FusionDequantAddBNReluQuantKernel<DeviceType, T>> {
public:
FusionDequantAddBNReluQuantOp(const std::string &type,
const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionDequantAddBNReluQuantParam<DeviceType>,
operators::FusionDequantAddBNReluQuantKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
// inference output shape
void InferShape() const override;
};
#endif // FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
#ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP
class FusionDequantAddBNQuantMatcher : public framework::FusionOpMatcher {
public:
FusionDequantAddBNQuantMatcher() {
node_ = framework::Node(G_OP_TYPE_DEQUANTIZE);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_ELEMENTWISE_ADD) >
std::make_shared<framework::Node>(G_OP_TYPE_BATCHNORM) >
std::make_shared<framework::Node>(G_OP_TYPE_QUANTIZE);
}
void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_ELEMENTWISE_ADD, {{"Y", "Y"}}},
{G_OP_TYPE_BATCHNORM,
{{"Scale", "BNScale"},
{"Mean", "BNMean"},
{"Bias", "BNBias"},
{"Variance", "BNVariance"},
{"Y", "Out"}}}},
removed_nodes);
}
std::string Type() { return G_OP_TYPE_FUSION_DEQUANT_ADD_BN_QUANT; }
};
template <typename DeviceType, typename T>
class FusionDequantAddBNQuantOp
: public framework::OperatorWithKernel<
DeviceType, FusionDequantAddBNQuantParam<DeviceType>,
operators::FusionDequantAddBNQuantKernel<DeviceType, T>> {
public:
FusionDequantAddBNQuantOp(const std::string &type,
const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionDequantAddBNQuantParam<DeviceType>,
operators::FusionDequantAddBNQuantKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
// inference output shape
void InferShape() const override;
};
#endif // FUSION_DEQUANT_ADD_BN_QUANT_OP
} // namespace operators
} // namespace paddle_mobile
...@@ -12,28 +12,43 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,28 +12,43 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifdef FUSION_DEQUANT_BN_RELU_OP #include "operators/fusion_dequant_bn_op.h"
#include "operators/fusion_dequant_bn_relu_op.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
#ifdef FUSION_DEQUANT_BN_OP
template <typename Dtype, typename T>
void FusionDequantBNOp<Dtype, T>::InferShape() const {
const auto& input_dims = this->param_.input_->dims();
this->param_.output_->Resize(input_dims);
}
#endif // FUSION_DEQUANT_BN_OP
#ifdef FUSION_DEQUANT_BN_RELU_OP
template <typename Dtype, typename T> template <typename Dtype, typename T>
void FusionDequantBNReluOp<Dtype, T>::InferShape() const { void FusionDequantBNReluOp<Dtype, T>::InferShape() const {
const auto& input_dims = this->param_.input_->dims(); const auto& input_dims = this->param_.input_->dims();
this->param_.output_->Resize(input_dims); this->param_.output_->Resize(input_dims);
} }
#endif // FUSION_DEQUANT_BN_RELU_OP
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
#ifdef FUSION_DEQUANT_BN_OP
REGISTER_FUSION_MATCHER(fusion_dequant_bn, ops::FusionDequantBNMatcher);
#ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_dequant_bn, ops::FusionDequantBNOp);
#endif // PADDLE_MOBILE_CPU
#endif // FUSION_DEQUANT_BN_OP
#ifdef FUSION_DEQUANT_BN_RELU_OP
REGISTER_FUSION_MATCHER(fusion_dequant_bn_relu, REGISTER_FUSION_MATCHER(fusion_dequant_bn_relu,
ops::FusionDequantBNReluMatcher); ops::FusionDequantBNReluMatcher);
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(fusion_dequant_bn_relu, ops::FusionDequantBNReluOp); REGISTER_OPERATOR_CPU(fusion_dequant_bn_relu, ops::FusionDequantBNReluOp);
#endif #endif // PADDLE_MOBILE_CPU
#endif // FUSION_DEQUANT_BN_RELU_OP
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/dequant_bn_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
#if defined(FUSION_DEQUANT_BN_OP) || defined(FUSION_DEQUANT_BN_RELU_OP)
class FusionDequantBNMatcher : public framework::FusionOpMatcher {
public:
FusionDequantBNMatcher() {
node_ = framework::Node(G_OP_TYPE_DEQUANTIZE);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_BATCHNORM);
}
virtual void FolderNodes(
framework::Node *node,
std::vector<std::shared_ptr<framework::Node>> *removed_nodes) {
node->Folder(node_.Depth(), Type(),
{{G_OP_TYPE_BATCHNORM,
{{"Scale", "BNScale"},
{"Mean", "BNMean"},
{"Bias", "BNBias"},
{"Variance", "BNVariance"},
{"Y", "Out"}}}},
removed_nodes);
}
std::string Type() override { return G_OP_TYPE_FUSION_DEQUANT_BN; }
};
#endif // FUSION_DEQUANT_BN_OP || FUSION_DEQUANT_BN_RELU_OP
#ifdef FUSION_DEQUANT_BN_OP
template <typename DeviceType, typename T>
class FusionDequantBNOp : public framework::OperatorWithKernel<
DeviceType, FusionDequantBNParam<DeviceType>,
operators::FusionDequantBNKernel<DeviceType, T>> {
public:
FusionDequantBNOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionDequantBNParam<DeviceType>,
operators::FusionDequantBNKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
// inference output shape
void InferShape() const override;
};
#endif // FUSION_DEQUANT_BN_OP
#ifdef FUSION_DEQUANT_BN_RELU_OP
class FusionDequantBNReluMatcher : public FusionDequantBNMatcher {
public:
FusionDequantBNReluMatcher() : FusionDequantBNMatcher() {
node_ > std::make_shared<framework::Node>(G_OP_TYPE_RELU);
}
virtual std::string Type() { return G_OP_TYPE_FUSION_DEQUANT_BN_RELU; }
};
template <typename DeviceType, typename T>
class FusionDequantBNReluOp
: public framework::OperatorWithKernel<
DeviceType, FusionDequantBNParam<DeviceType>,
operators::FusionDequantBNReluKernel<DeviceType, T>> {
public:
FusionDequantBNReluOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<
DeviceType, FusionDequantBNParam<DeviceType>,
operators::FusionDequantBNReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const override;
};
#endif // FUSION_DEQUANT_BN_RELU_OP
} // namespace operators
} // namespace paddle_mobile
...@@ -42,7 +42,8 @@ class FusionDequantBNReluMatcher : public framework::FusionOpMatcher { ...@@ -42,7 +42,8 @@ class FusionDequantBNReluMatcher : public framework::FusionOpMatcher {
{{"Scale", "BNScale"}, {{"Scale", "BNScale"},
{"Mean", "BNMean"}, {"Mean", "BNMean"},
{"Bias", "BNBias"}, {"Bias", "BNBias"},
{"Variance", "BNVariance"}}}}, {"Variance", "BNVariance"},
{"Y", "Out"}}}},
removed_nodes); removed_nodes);
} }
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FC_INT8_OP
#pragma once
#include <string>
#include <vector>
#include "framework/operator.h"
#include "framework/program/program-optimize/fusion_op_register.h"
#include "operators/kernel/fusion_fc_kernel.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FusionFcInt8Op
: public framework::OperatorWithKernel<DeviceType,
FusionFcParam<DeviceType>,
FusionFcKernel<DeviceType, T>> {
public:
FusionFcInt8Op(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs,
const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, FusionFcParam<DeviceType>,
FusionFcKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
void InferShape() const override;
};
} // namespace operators
} // namespace paddle_mobile
#endif // FUSION_FC_INT8_OP
...@@ -32,20 +32,6 @@ void ConvAddReluKernel<CPU, float>::Compute( ...@@ -32,20 +32,6 @@ void ConvAddReluKernel<CPU, float>::Compute(
} }
template class ConvAddReluKernel<CPU, float>; template class ConvAddReluKernel<CPU, float>;
#ifdef FUSION_CONVADDRELU_INT8_OP
template <>
bool ConvAddReluKernel<CPU, int8_t>::Init(FusionConvAddReluParam<CPU> *param) {
return true;
}
template <>
void ConvAddReluKernel<CPU, int8_t>::Compute(
const FusionConvAddReluParam<CPU> &param) {
ConvAddReluCompute<int8_t, int32_t>(param);
}
template class ConvAddReluKernel<CPU, int8_t>;
#endif
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_DEQUANT_ADD_BN_OP
#include "operators/kernel/dequant_add_bn_kernel.h"
#include <cmath>
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
template <>
bool FusionDequantAddBNKernel<CPU, float>::Init(
FusionDequantAddBNParam<CPU> *param) {
// elementwise add params
const Tensor *bias = param->bias_;
// batch norm params
const Tensor *bn_mean = param->bn_mean_;
const Tensor *bn_variance = param->bn_variance_;
Tensor *bn_scale = param->bn_scale_;
Tensor *bn_bias = param->bn_bias_;
const float epsilon = param->epsilon_;
const float *bias_ptr = bias->data<float>();
const float *mean_ptr = bn_mean->data<float>();
const float *var_ptr = bn_variance->data<float>();
float *bn_scale_ptr = bn_scale->mutable_data<float>();
float *bn_bias_ptr = bn_bias->mutable_data<float>();
for (int c = 0; c < bn_scale->numel(); ++c) {
float inv_scale = bn_scale_ptr[c] / (std::sqrt(var_ptr[c] + epsilon));
bn_scale_ptr[c] = inv_scale;
bn_bias_ptr[c] = inv_scale * (bias_ptr[c] - mean_ptr[c]) + bn_bias_ptr[c];
}
return true;
}
template <>
void FusionDequantAddBNKernel<CPU, float>::Compute(
const FusionDequantAddBNParam<CPU> &param) {
const int32_t *input = param.input_->data<int32_t>();
const float *bn_scale = param.bn_scale_->data<float>();
const float *bn_bias = param.bn_bias_->data<float>();
// dequantize params
const float activation_scale = param.activation_scale_->data<float>()[0];
const float weight_scale = param.weight_scale_;
const float dequant_scale = activation_scale / weight_scale;
float *output = param.output_->mutable_data<float>();
int batch_size = param.input_->dims()[0];
int channels = param.input_->dims()[1];
size_t spatial_size = param.input_->dims()[2] * param.input_->dims()[3];
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < batch_size; ++batch) {
for (int c = 0; c < channels; ++c) {
float scale = bn_scale[c] * dequant_scale;
float bias = bn_bias[c];
size_t offset = (batch * channels + c) * spatial_size;
const int32_t *x = input + offset;
float *y = output + offset;
size_t remain = spatial_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = spatial_size >> 4;
remain = spatial_size & 0xF;
float32x4_t __scale = vdupq_n_f32(scale);
float32x4_t __bias = vdupq_n_f32(bias);
for (int k = 0; k < loop; ++k, x += 16, y += 16) {
int32x4_t r0 = vld1q_s32(x);
int32x4_t r1 = vld1q_s32(x + 4);
int32x4_t r2 = vld1q_s32(x + 8);
int32x4_t r3 = vld1q_s32(x + 12);
float32x4_t f0 = vcvtq_f32_s32(r0);
float32x4_t f1 = vcvtq_f32_s32(r1);
float32x4_t f2 = vcvtq_f32_s32(r2);
float32x4_t f3 = vcvtq_f32_s32(r3);
f0 = vmlaq_f32(__bias, __scale, f0);
f1 = vmlaq_f32(__bias, __scale, f1);
f2 = vmlaq_f32(__bias, __scale, f2);
f3 = vmlaq_f32(__bias, __scale, f3);
vst1q_f32(y, f0);
vst1q_f32(y + 4, f1);
vst1q_f32(y + 8, f2);
vst1q_f32(y + 12, f3);
}
#endif // __ARM_NEON__
for (int k = 0; k < remain; ++k) {
y[k] = scale * x[k] + bias;
}
}
}
}
} // namespace operators
} // namespace paddle_mobile
#endif // FUSION_DEQUANT_ADD_BN_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "operators/kernel/dequant_bn_relu_kernel.h"
#include <cmath>
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
#if defined(FUSION_DEQUANT_BN_RELU_OP) || defined(FUSION_DEQUANT_ADD_BN_RELU_OP)
void DequantBNReluCompute(const FusionDequantBNParam<CPU> *param) {
const int32_t *input = param->input_->data<int32_t>();
const float *bn_scale = param->bn_scale_->data<float>();
const float *bn_bias = param->bn_bias_->data<float>();
// dequantize params
const float activation_scale = param->activation_scale_->data<float>()[0];
const float weight_scale = param->weight_scale_;
const float dequant_scale = activation_scale / weight_scale;
float *output = param->output_->mutable_data<float>();
int batch_size = param->input_->dims()[0];
int channels = param->input_->dims()[1];
size_t spatial_size = param->input_->dims()[2] * param->input_->dims()[3];
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < batch_size; ++batch) {
for (int c = 0; c < channels; ++c) {
float scale = bn_scale[c] * dequant_scale;
float bias = bn_bias[c];
size_t offset = (batch * channels + c) * spatial_size;
const int32_t *x = input + offset;
float *y = output + offset;
size_t remain = spatial_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = spatial_size >> 4;
remain = spatial_size & 0xF;
float32x4_t __scale = vdupq_n_f32(scale);
float32x4_t __bias = vdupq_n_f32(bias);
float32x4_t __zero = vdupq_n_f32(0.f);
for (int k = 0; k < loop; ++k, x += 16, y += 16) {
int32x4_t r0 = vld1q_s32(x);
int32x4_t r1 = vld1q_s32(x + 4);
int32x4_t r2 = vld1q_s32(x + 8);
int32x4_t r3 = vld1q_s32(x + 12);
float32x4_t f0 = vcvtq_f32_s32(r0);
float32x4_t f1 = vcvtq_f32_s32(r1);
float32x4_t f2 = vcvtq_f32_s32(r2);
float32x4_t f3 = vcvtq_f32_s32(r3);
f0 = vmlaq_f32(__bias, __scale, f0);
f1 = vmlaq_f32(__bias, __scale, f1);
f2 = vmlaq_f32(__bias, __scale, f2);
f3 = vmlaq_f32(__bias, __scale, f3);
f0 = vmaxq_f32(__zero, f0);
f1 = vmaxq_f32(__zero, f1);
f2 = vmaxq_f32(__zero, f2);
f3 = vmaxq_f32(__zero, f3);
vst1q_f32(y, f0);
vst1q_f32(y + 4, f1);
vst1q_f32(y + 8, f2);
vst1q_f32(y + 12, f3);
}
#endif // __ARM_NEON__
for (int k = 0; k < remain; ++k) {
y[k] = std::max(scale * x[k] + bias, 0.f);
}
}
}
}
#endif
#ifdef FUSION_DEQUANT_BN_RELU_OP
template <>
bool FusionDequantBNReluKernel<CPU, float>::Init(
FusionDequantBNReluParam<CPU> *param) {
// batch norm params
const Tensor *bn_mean = param->bn_mean_;
const Tensor *bn_variance = param->bn_variance_;
Tensor *bn_scale = param->bn_scale_;
Tensor *bn_bias = param->bn_bias_;
const float epsilon = param->epsilon_;
const float *mean_ptr = bn_mean->data<float>();
const float *var_ptr = bn_variance->data<float>();
float *bn_scale_ptr = bn_scale->mutable_data<float>();
float *bn_bias_ptr = bn_bias->mutable_data<float>();
for (int c = 0; c < bn_scale->numel(); ++c) {
float inv_scale = bn_scale_ptr[c] / (std::sqrt(var_ptr[c] + epsilon));
bn_scale_ptr[c] = inv_scale;
bn_bias_ptr[c] = bn_bias_ptr[c] - inv_scale * mean_ptr[c];
}
return true;
}
template <>
void FusionDequantBNReluKernel<CPU, float>::Compute(
const FusionDequantBNReluParam<CPU> &param) {
DequantBNReluCompute(&param);
}
#endif // FUSION_DEQUANT_BN_RELU_OP
#ifdef FUSION_DEQUANT_ADD_BN_RELU_OP
template <>
bool FusionDequantAddBNReluKernel<CPU, float>::Init(
FusionDequantAddBNReluParam<CPU> *param) {
// elementwise add params
const Tensor *bias = param->bias_;
// batch norm params
const Tensor *bn_mean = param->bn_mean_;
const Tensor *bn_variance = param->bn_variance_;
Tensor *bn_scale = param->bn_scale_;
Tensor *bn_bias = param->bn_bias_;
const float epsilon = param->epsilon_;
const float *bias_ptr = bias->data<float>();
const float *mean_ptr = bn_mean->data<float>();
const float *var_ptr = bn_variance->data<float>();
float *bn_scale_ptr = bn_scale->mutable_data<float>();
float *bn_bias_ptr = bn_bias->mutable_data<float>();
for (int c = 0; c < bn_scale->numel(); ++c) {
float inv_scale = bn_scale_ptr[c] / (std::sqrt(var_ptr[c] + epsilon));
bn_scale_ptr[c] = inv_scale;
bn_bias_ptr[c] = inv_scale * (bias_ptr[c] - mean_ptr[c]) + bn_bias_ptr[c];
}
return true;
}
template <>
void FusionDequantAddBNReluKernel<CPU, float>::Compute(
const FusionDequantAddBNReluParam<CPU> &param) {
DequantBNReluCompute(&param);
}
#endif // FUSION_DEQUANT_ADD_BN_RELU_OP
} // namespace operators
} // namespace paddle_mobile
/* Copyright (c) 201f8 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cmath>
#include "operators/kernel/dequant_bn_kernel.h"
#include "operators/math/activation.h"
#include "operators/math/quantize.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
#if defined(FUSION_DEQUANT_BN_OP) || defined(FUSION_DEQUANT_ADD_BN_OP) || \
defined(FUSION_DEQUANT_BN_RELU_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_OP) || \
defined(FUSION_DEQUANT_ADD_BN_QUANT_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP)
void PublicFusionDequantBNInitParam(FusionDequantBNParam<CPU> *param,
const framework::Tensor *bias) {
// batch norm params
const Tensor *bn_mean = param->bn_mean_;
const Tensor *bn_variance = param->bn_variance_;
Tensor *bn_scale = param->bn_scale_;
Tensor *bn_bias = param->bn_bias_;
const float epsilon = param->epsilon_;
const float *mean_ptr = bn_mean->data<float>();
const float *var_ptr = bn_variance->data<float>();
float *bn_scale_ptr = bn_scale->mutable_data<float>();
float *bn_bias_ptr = bn_bias->mutable_data<float>();
for (int c = 0; c < bn_scale->numel(); ++c) {
float inv_scale = 1.f / (std::sqrt(var_ptr[c] + epsilon));
float val = bias ? bias->data<float>()[c] : 0;
bn_bias_ptr[c] =
inv_scale * bn_scale_ptr[c] * (val - mean_ptr[c]) + bn_bias_ptr[c];
bn_scale_ptr[c] = inv_scale * bn_scale_ptr[c];
}
}
#endif
#if defined(FUSION_DEQUANT_BN_OP) || defined(FUSION_DEQUANT_ADD_BN_OP) || \
defined(FUSION_DEQUANT_BN_RELU_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_OP)
template <ActivationType Act>
void DequantBNCompute(const FusionDequantBNParam<CPU> *param) {
const int32_t *input = param->input_->data<int32_t>();
const float *bn_scale = param->bn_scale_->data<float>();
const float *bn_bias = param->bn_bias_->data<float>();
// dequantize params
const float activation_scale = param->activation_scale_->data<float>()[0];
const float weight_scale = param->weight_scale_;
const float dequant_scale = activation_scale / weight_scale;
float *output = param->output_->mutable_data<float>();
int batch_size = param->input_->dims()[0];
int channels = param->input_->dims()[1];
size_t spatial_size = param->input_->dims()[2] * param->input_->dims()[3];
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < batch_size; ++batch) {
for (int c = 0; c < channels; ++c) {
// not fuse bn and dequant scale to minimize precision difference
// float scale = bn_scale[c] * dequant_scale;
float scale = bn_scale[c];
float bias = bn_bias[c];
size_t offset = (batch * channels + c) * spatial_size;
const int32_t *x = input + offset;
float *y = output + offset;
size_t remain = spatial_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = spatial_size >> 4;
remain = spatial_size & 0xF;
float32x4_t __dequant_scale = vdupq_n_f32(dequant_scale);
float32x4_t __scale = vdupq_n_f32(scale);
float32x4_t __bias = vdupq_n_f32(bias);
for (int k = 0; k < loop; ++k, x += 16, y += 16) {
int32x4_t r0 = vld1q_s32(x);
int32x4_t r1 = vld1q_s32(x + 4);
int32x4_t r2 = vld1q_s32(x + 8);
int32x4_t r3 = vld1q_s32(x + 12);
float32x4_t f0 = vcvtq_f32_s32(r0);
float32x4_t f1 = vcvtq_f32_s32(r1);
float32x4_t f2 = vcvtq_f32_s32(r2);
float32x4_t f3 = vcvtq_f32_s32(r3);
f0 = vmulq_f32(__dequant_scale, f0);
f1 = vmulq_f32(__dequant_scale, f1);
f2 = vmulq_f32(__dequant_scale, f2);
f3 = vmulq_f32(__dequant_scale, f3);
f0 = vmlaq_f32(__bias, __scale, f0);
f1 = vmlaq_f32(__bias, __scale, f1);
f2 = vmlaq_f32(__bias, __scale, f2);
f3 = vmlaq_f32(__bias, __scale, f3);
f0 = math::vActiveq_f32<Act>(f0);
f1 = math::vActiveq_f32<Act>(f1);
f2 = math::vActiveq_f32<Act>(f2);
f3 = math::vActiveq_f32<Act>(f3);
vst1q_f32(y, f0);
vst1q_f32(y + 4, f1);
vst1q_f32(y + 8, f2);
vst1q_f32(y + 12, f3);
}
#endif // __ARM_NEON__
for (int k = 0; k < remain; ++k) {
y[k] = math::Active<Act>(scale * (dequant_scale * x[k]) + bias);
}
}
}
}
#endif
#ifdef FUSION_DEQUANT_BN_OP
template <>
bool FusionDequantBNKernel<CPU, float>::Init(FusionDequantBNParam<CPU> *param) {
PublicFusionDequantBNInitParam(param, nullptr);
return true;
}
template <>
void FusionDequantBNKernel<CPU, float>::Compute(
const FusionDequantBNParam<CPU> &param) {
DequantBNCompute<IDENTITY>(&param);
}
#endif // FUSION_DEQUANT_BN_OP
#ifdef FUSION_DEQUANT_BN_RELU_OP
template <>
bool FusionDequantBNReluKernel<CPU, float>::Init(
FusionDequantBNParam<CPU> *param) {
PublicFusionDequantBNInitParam(param, nullptr);
return true;
}
template <>
void FusionDequantBNReluKernel<CPU, float>::Compute(
const FusionDequantBNParam<CPU> &param) {
DequantBNCompute<RELU>(&param);
}
#endif // FUSION_DEQUANT_BN_RELU_OP
#ifdef FUSION_DEQUANT_ADD_BN_OP
template <>
bool FusionDequantAddBNKernel<CPU, float>::Init(
FusionDequantAddBNParam<CPU> *param) {
const framework::Tensor *bias = param->bias_;
PublicFusionDequantBNInitParam(param, bias);
return true;
}
template <>
void FusionDequantAddBNKernel<CPU, float>::Compute(
const FusionDequantAddBNParam<CPU> &param) {
DequantBNCompute<IDENTITY>(&param);
}
#endif // FUSION_DEQUANT_ADD_BN_OP
#ifdef FUSION_DEQUANT_ADD_BN_RELU_OP
template <>
bool FusionDequantAddBNReluKernel<CPU, float>::Init(
FusionDequantAddBNParam<CPU> *param) {
const framework::Tensor *bias = param->bias_;
PublicFusionDequantBNInitParam(param, bias);
return true;
}
template <>
void FusionDequantAddBNReluKernel<CPU, float>::Compute(
const FusionDequantAddBNParam<CPU> &param) {
DequantBNCompute<RELU>(&param);
}
#endif // FUSION_DEQUANT_ADD_BN_RELU_OP
#if defined(FUSION_DEQUANT_ADD_BN_QUANT_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP)
template <Activation Act, RoundType R>
void DequantBNQuantCompute(const FusionDequantAddBNQuantParam<CPU> *param) {
const int32_t *input = param->input_->data<int32_t>();
const float *bn_scale = param->bn_scale_->data<float>();
const float *bn_bias = param->bn_bias_->data<float>();
// dequantize params
const float activation_scale = param->activation_scale_->data<float>()[0];
const float weight_scale = param->weight_scale_;
const float dequant_scale = activation_scale / weight_scale;
// quantize params
Tensor *output_scale = param->online_scale_;
float max_abs = 0.f;
int8_t *output = param->output_->mutable_data<int8_t>();
int batch_size = param->input_->dims()[0];
int channels = param->input_->dims()[1];
size_t spatial_size = param->input_->dims()[2] * param->input_->dims()[3];
// if (param->is_static_) {
if (true) {
max_abs = param->static_scale_;
float quant_scale = 127.f / max_abs;
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < batch_size; ++batch) {
for (int c = 0; c < channels; ++c) {
// not fuse bn and dequant scale to minimize precision difference
// float scale = bn_scale[c] * dequant_scale;
float scale = bn_scale[c];
float bias = bn_bias[c];
size_t offset = (batch * channels + c) * spatial_size;
const int32_t *x = input + offset;
int8_t *y = output + offset;
size_t remain = spatial_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = spatial_size >> 4;
remain = spatial_size & 0xF;
float32x4_t __dequant_scale = vdupq_n_f32(dequant_scale);
float32x4_t __scale = vdupq_n_f32(scale);
float32x4_t __bias = vdupq_n_f32(bias);
float32x4_t __quant_scale = vdupq_n_f32(quant_scale);
for (int k = 0; k < loop; ++k, x += 16, y += 16) {
int32x4_t r0 = vld1q_s32(x);
int32x4_t r1 = vld1q_s32(x + 4);
int32x4_t r2 = vld1q_s32(x + 8);
int32x4_t r3 = vld1q_s32(x + 12);
float32x4_t f0 = vcvtq_f32_s32(r0);
float32x4_t f1 = vcvtq_f32_s32(r1);
float32x4_t f2 = vcvtq_f32_s32(r2);
float32x4_t f3 = vcvtq_f32_s32(r3);
f0 = vmulq_f32(__dequant_scale, f0);
f1 = vmulq_f32(__dequant_scale, f1);
f2 = vmulq_f32(__dequant_scale, f2);
f3 = vmulq_f32(__dequant_scale, f3);
f0 = vmlaq_f32(__bias, __scale, f0);
f1 = vmlaq_f32(__bias, __scale, f1);
f2 = vmlaq_f32(__bias, __scale, f2);
f3 = vmlaq_f32(__bias, __scale, f3);
f0 = math::vActiveq_f32<Act>(f0);
f1 = math::vActiveq_f32<Act>(f1);
f2 = math::vActiveq_f32<Act>(f2);
f3 = math::vActiveq_f32<Act>(f3);
f0 = vmulq_f32(__quant_scale, f0);
f1 = vmulq_f32(__quant_scale, f1);
f2 = vmulq_f32(__quant_scale, f2);
f3 = vmulq_f32(__quant_scale, f3);
int32x4_t q0 = math::vRoundq_f32<R>(f0);
int32x4_t q1 = math::vRoundq_f32<R>(f1);
int32x4_t q2 = math::vRoundq_f32<R>(f2);
int32x4_t q3 = math::vRoundq_f32<R>(f3);
int16x4_t d0 = vmovn_s32(q0);
int16x4_t d1 = vmovn_s32(q1);
int16x4_t d2 = vmovn_s32(q2);
int16x4_t d3 = vmovn_s32(q3);
int16x8_t q5 = vcombine_s16(d0, d1);
int16x8_t q6 = vcombine_s16(d2, d3);
int8x8_t d5 = vmovn_s16(q5);
int8x8_t d6 = vmovn_s16(q6);
vst1_s8(y, d5);
vst1_s8(y + 8, d6);
}
#endif // __ARM_NEON__
for (int k = 0; k < remain; ++k) {
float x_temp =
math::Active<Act>(scale * (dequant_scale * x[k]) + bias);
y[k] = math::Round<R>(x_temp * quant_scale);
}
}
}
} else {
// TODO(hjchen2)
max_abs = std::max(max_abs, 1e-6f);
}
param->online_scale_->mutable_data<float>()[0] = max_abs;
}
template <>
bool FusionDequantAddBNQuantKernel<CPU, float>::Init(
FusionDequantAddBNQuantParam<CPU> *param) {
const framework::Tensor *bias = param->bias_;
PublicFusionDequantBNInitParam(param, bias);
return true;
}
template <>
void FusionDequantAddBNQuantKernel<CPU, float>::Compute(
const FusionDequantAddBNQuantParam<CPU> &param) {
switch (param.round_type_) {
case ROUND_NEAREST_TO_EVEN:
DequantBNQuantCompute<IDENTITY, ROUND_NEAREST_TO_EVEN>(&param);
break;
case ROUND_NEAREST_TOWARDS_ZERO:
DequantBNQuantCompute<IDENTITY, ROUND_NEAREST_TOWARDS_ZERO>(&param);
break;
case ROUND_NEAREST_AWAY_ZERO:
DequantBNQuantCompute<IDENTITY, ROUND_NEAREST_AWAY_ZERO>(&param);
break;
default:
LOG(kLOG_ERROR) << "round type is not supported.";
break;
}
}
#endif // FUSION_DEQUANT_ADD_BN_QUANT_OP
#ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
template <>
bool FusionDequantAddBNReluQuantKernel<CPU, float>::Init(
FusionDequantAddBNQuantParam<CPU> *param) {
const framework::Tensor *bias = param->bias_;
PublicFusionDequantBNInitParam(param, bias);
return true;
}
template <>
void FusionDequantAddBNReluQuantKernel<CPU, float>::Compute(
const FusionDequantAddBNQuantParam<CPU> &param) {
switch (param.round_type_) {
case ROUND_NEAREST_TO_EVEN:
DequantBNQuantCompute<RELU, ROUND_NEAREST_TO_EVEN>(&param);
break;
case ROUND_NEAREST_TOWARDS_ZERO:
DequantBNQuantCompute<RELU, ROUND_NEAREST_TOWARDS_ZERO>(&param);
break;
case ROUND_NEAREST_AWAY_ZERO:
DequantBNQuantCompute<RELU, ROUND_NEAREST_AWAY_ZERO>(&param);
break;
default:
LOG(kLOG_ERROR) << "round type is not supported.";
break;
}
}
#endif // FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
} // namespace operators
} // namespace paddle_mobile
...@@ -16,6 +16,7 @@ limitations under the License. */ ...@@ -16,6 +16,7 @@ limitations under the License. */
#include "operators/kernel/quantize_kernel.h" #include "operators/kernel/quantize_kernel.h"
#include <cmath> #include <cmath>
#include "operators/math/quantize.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON) #if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h> #include <arm_neon.h>
...@@ -32,81 +33,68 @@ inline float32_t vmaxvq_f32(float32x4_t r) { ...@@ -32,81 +33,68 @@ inline float32_t vmaxvq_f32(float32x4_t r) {
} }
#endif #endif
template <RoundType R = ROUND_NEAREST_TOWARDS_ZERO> template <RoundType R>
inline int32x4_t vround_f32(float32x4_t r) { inline void QuantizeOffline(const Tensor *input, const float scale,
return vcvtq_s32_f32(r); const float max_abs, Tensor *output) {
} const float *x = input->data<const float>();
int8_t *y = output->mutable_data<int8_t>();
template <> size_t remain = input->numel();
inline int32x4_t vround_f32<ROUND_NEAREST_AWAY_ZERO>(float32x4_t r) { #if defined(__ARM_NEON__) || defined(__ARM_NEON)
float32x4_t plus = vdupq_n_f32(0.5); size_t loop = remain >> 4;
float32x4_t minus = vdupq_n_f32(-0.5); remain = remain & 0xF;
float32x4_t zero = vdupq_n_f32(0); float32x4_t __scale = vdupq_n_f32(scale);
uint32x4_t more_than_zero = vcgtq_f32(r, zero); float32x4_t __postive_max = vdupq_n_f32(max_abs);
float32x4_t temp = vbslq_f32(more_than_zero, plus, minus); float32x4_t __negtive_max = vdupq_n_f32(-max_abs);
temp = vaddq_f32(r, temp); #pragma omp parallel for
int32x4_t ret = vcvtq_s32_f32(temp); for (size_t i = 0; i < loop; ++i) {
return ret; const float *local_x = x + (i << 4);
} int8_t *local_y = y + (i << 4);
float32x4_t r0 = vld1q_f32(local_x);
template <> float32x4_t r1 = vld1q_f32(local_x + 4);
inline int32x4_t vround_f32<ROUND_NEAREST_TO_EVEN>(float32x4_t r) { float32x4_t r2 = vld1q_f32(local_x + 8);
float32x4_t point5 = vdupq_n_f32(0.5); float32x4_t r3 = vld1q_f32(local_x + 12);
int32x4_t one = vdupq_n_s32(1); r0 = vmaxq_f32(vminq_f32(r0, __postive_max), __negtive_max);
int32x4_t zero = vdupq_n_s32(0); r1 = vmaxq_f32(vminq_f32(r1, __postive_max), __negtive_max);
r2 = vmaxq_f32(vminq_f32(r2, __postive_max), __negtive_max);
int32x4_t rnd = vround_f32<ROUND_NEAREST_AWAY_ZERO>(r); r3 = vmaxq_f32(vminq_f32(r3, __postive_max), __negtive_max);
float32x4_t frnd = vcvtq_f32_s32(rnd); r0 = vmulq_f32(r0, __scale);
frnd = vsubq_f32(frnd, r); r1 = vmulq_f32(r1, __scale);
frnd = vabsq_f32(frnd); r2 = vmulq_f32(r2, __scale);
uint32x4_t equal_point5 = vceqq_f32(frnd, point5); r3 = vmulq_f32(r3, __scale);
int32x4_t abs_rnd = vabsq_s32(rnd); int32x4_t q0 = math::vRoundq_f32<R>(r0);
abs_rnd = vandq_s32(abs_rnd, one); int32x4_t q1 = math::vRoundq_f32<R>(r1);
uint32x4_t not_mod2 = vreinterpretq_u32_s32(abs_rnd); int32x4_t q2 = math::vRoundq_f32<R>(r2);
uint32x4_t mask = vandq_u32(equal_point5, not_mod2); int32x4_t q3 = math::vRoundq_f32<R>(r3);
uint32x4_t more_than_zero = vcgtq_s32(rnd, zero); int16x4_t d0 = vmovn_s32(q0);
more_than_zero = vandq_u32(more_than_zero, vreinterpretq_u32_s32(one)); int16x4_t d1 = vmovn_s32(q1);
mask = veorq_u32(more_than_zero, mask); int16x4_t d2 = vmovn_s32(q2);
more_than_zero = veorq_u32(more_than_zero, vreinterpretq_u32_s32(one)); int16x4_t d3 = vmovn_s32(q3);
mask = vaddq_u32(more_than_zero, mask); int16x8_t q5 = vcombine_s16(d0, d1);
int32x4_t smask = vreinterpretq_s32_u32(mask); int16x8_t q6 = vcombine_s16(d2, d3);
smask = vsubq_s32(smask, one); int8x8_t d5 = vmovn_s16(q5);
rnd = vaddq_s32(rnd, smask); int8x8_t d6 = vmovn_s16(q6);
return rnd; vst1_s8(local_y, d5);
} vst1_s8(local_y + 8, d6);
#endif
template <RoundType R = ROUND_NEAREST_TOWARDS_ZERO>
inline int8_t Round(const float &x) {
return static_cast<int8_t>(x);
}
template <>
inline int8_t Round<ROUND_NEAREST_AWAY_ZERO>(const float &x) {
return std::round(x);
}
template <>
inline int8_t Round<ROUND_NEAREST_TO_EVEN>(const float &x) {
float v = std::round(x);
int32_t q = static_cast<int32_t>(v);
if (std::abs(std::abs(q - v) - 0.5) <= 0) {
if (std::abs(q) % 2 != 0) {
q = q + ((q > 0) ? -1 : 1);
} }
x += (loop << 4);
y += (loop << 4);
#endif
for (size_t i = 0; i < remain; ++i) {
float x_temp = std::max(std::min(x[i], max_abs), -max_abs);
y[i] = math::Round<R>(x_temp * scale);
} }
return static_cast<int8_t>(q);
} }
template <RoundType R> template <RoundType R>
static void Quantize(const Tensor *input, const float scale, Tensor *output) { inline void QuantizeOnline(const Tensor *input, const float scale,
Tensor *output) {
const float *x = input->data<const float>(); const float *x = input->data<const float>();
int8_t *y = output->mutable_data<int8_t>(); int8_t *y = output->mutable_data<int8_t>();
size_t remain = input->numel(); size_t remain = input->numel();
#if defined(__ARM_NEON__) || defined(__ARM_NEON) #if defined(__ARM_NEON__) || defined(__ARM_NEON)
size_t loop = remain >> 4; size_t loop = remain >> 4;
remain = remain & 0xF; remain = remain & 0xF;
float32x4_t __scale = vdupq_n_f32(scale);
#pragma omp parallel for #pragma omp parallel for
for (size_t i = 0; i < loop; ++i) { for (size_t i = 0; i < loop; ++i) {
const float *local_x = x + (i << 4); const float *local_x = x + (i << 4);
...@@ -115,14 +103,14 @@ static void Quantize(const Tensor *input, const float scale, Tensor *output) { ...@@ -115,14 +103,14 @@ static void Quantize(const Tensor *input, const float scale, Tensor *output) {
float32x4_t r1 = vld1q_f32(local_x + 4); float32x4_t r1 = vld1q_f32(local_x + 4);
float32x4_t r2 = vld1q_f32(local_x + 8); float32x4_t r2 = vld1q_f32(local_x + 8);
float32x4_t r3 = vld1q_f32(local_x + 12); float32x4_t r3 = vld1q_f32(local_x + 12);
r0 = vmulq_n_f32(r0, scale); r0 = vmulq_f32(r0, __scale);
r1 = vmulq_n_f32(r1, scale); r1 = vmulq_f32(r1, __scale);
r2 = vmulq_n_f32(r2, scale); r2 = vmulq_f32(r2, __scale);
r3 = vmulq_n_f32(r3, scale); r3 = vmulq_f32(r3, __scale);
int32x4_t q0 = vround_f32<R>(r0); int32x4_t q0 = math::vRoundq_f32<R>(r0);
int32x4_t q1 = vround_f32<R>(r1); int32x4_t q1 = math::vRoundq_f32<R>(r1);
int32x4_t q2 = vround_f32<R>(r2); int32x4_t q2 = math::vRoundq_f32<R>(r2);
int32x4_t q3 = vround_f32<R>(r3); int32x4_t q3 = math::vRoundq_f32<R>(r3);
int16x4_t d0 = vmovn_s32(q0); int16x4_t d0 = vmovn_s32(q0);
int16x4_t d1 = vmovn_s32(q1); int16x4_t d1 = vmovn_s32(q1);
int16x4_t d2 = vmovn_s32(q2); int16x4_t d2 = vmovn_s32(q2);
...@@ -138,7 +126,18 @@ static void Quantize(const Tensor *input, const float scale, Tensor *output) { ...@@ -138,7 +126,18 @@ static void Quantize(const Tensor *input, const float scale, Tensor *output) {
y += (loop << 4); y += (loop << 4);
#endif #endif
for (size_t i = 0; i < remain; ++i) { for (size_t i = 0; i < remain; ++i) {
y[i] = Round<R>(x[i] * scale); y[i] = math::Round<R>(x[i] * scale);
}
}
template <RoundType R>
static void Quantize(const Tensor *input, const float max_abs,
const bool offline, Tensor *output) {
float scale = 127.f / max_abs;
if (offline) {
QuantizeOffline<R>(input, scale, max_abs, output);
} else {
QuantizeOnline<R>(input, scale, output);
} }
} }
...@@ -173,6 +172,13 @@ float find_abs_max(const Tensor *input) { ...@@ -173,6 +172,13 @@ float find_abs_max(const Tensor *input) {
return max_abs; return max_abs;
} }
} // namespace operators
} // namespace paddle_mobile
#endif // __ARM_NEON__
namespace paddle_mobile {
namespace operators {
template <> template <>
bool QuantizeKernel<CPU, float>::Init(QuantizeParam<CPU> *param) { bool QuantizeKernel<CPU, float>::Init(QuantizeParam<CPU> *param) {
return true; return true;
...@@ -184,24 +190,23 @@ void QuantizeKernel<CPU, float>::Compute(const QuantizeParam<CPU> &param) { ...@@ -184,24 +190,23 @@ void QuantizeKernel<CPU, float>::Compute(const QuantizeParam<CPU> &param) {
Tensor *output = param.output_; Tensor *output = param.output_;
Tensor *output_scale = param.online_scale_; Tensor *output_scale = param.online_scale_;
float max_abs = 0.f; float max_abs = 0.f;
if (param.is_static_) { if (param.offline_) {
max_abs = param.static_scale_; max_abs = param.offline_scale_->data<float>()[0];
} else { } else {
max_abs = find_abs_max(input); max_abs = find_abs_max(input);
} }
max_abs = std::max(max_abs, 1e-6f); max_abs = std::max(max_abs, 1e-6f);
// only support int8 currently
float scale = 127 / max_abs;
param.online_scale_->mutable_data<float>()[0] = max_abs; param.online_scale_->mutable_data<float>()[0] = max_abs;
switch (param.round_type_) { switch (param.round_type_) {
case ROUND_NEAREST_TO_EVEN: case ROUND_NEAREST_TO_EVEN:
Quantize<ROUND_NEAREST_TO_EVEN>(input, scale, output); Quantize<ROUND_NEAREST_TO_EVEN>(input, max_abs, param.offline_, output);
break; break;
case ROUND_NEAREST_TOWARDS_ZERO: case ROUND_NEAREST_TOWARDS_ZERO:
Quantize<ROUND_NEAREST_TOWARDS_ZERO>(input, scale, output); Quantize<ROUND_NEAREST_TOWARDS_ZERO>(input, max_abs, param.offline_,
output);
break; break;
case ROUND_NEAREST_AWAY_ZERO: case ROUND_NEAREST_AWAY_ZERO:
Quantize<ROUND_NEAREST_AWAY_ZERO>(input, scale, output); Quantize<ROUND_NEAREST_AWAY_ZERO>(input, max_abs, param.offline_, output);
break; break;
default: default:
LOG(kLOG_ERROR) << "round type is not supported."; LOG(kLOG_ERROR) << "round type is not supported.";
...@@ -212,4 +217,4 @@ void QuantizeKernel<CPU, float>::Compute(const QuantizeParam<CPU> &param) { ...@@ -212,4 +217,4 @@ void QuantizeKernel<CPU, float>::Compute(const QuantizeParam<CPU> &param) {
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif // QUANT_OP
...@@ -15,11 +15,56 @@ limitations under the License. */ ...@@ -15,11 +15,56 @@ limitations under the License. */
#ifdef RELU_OP #ifdef RELU_OP
#include "operators/kernel/relu_kernel.h" #include "operators/kernel/relu_kernel.h"
#include "operators/kernel/central-arm-func/relu_arm_func.h" #include "common/types.h"
#include "operators/math/activation.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename Dtype, ActivationType Act>
struct ReluCompute {
void operator()(const Tensor *input, Tensor *output) {}
};
template <ActivationType Act>
struct ReluCompute<float, Act> {
void operator()(const Tensor *input, Tensor *output) {
const float *x = input->data<float>();
float *y = output->mutable_data<float>();
size_t remain = input->numel();
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
size_t loop = remain >> 4;
remain = remain & 0xF;
#pragma omp parallel for
for (size_t i = 0; i < loop; ++i) {
const float *local_x = x + (i << 4);
float *local_y = y + (i << 4);
float32x4_t r0 = vld1q_f32(local_x);
float32x4_t r1 = vld1q_f32(local_x + 4);
float32x4_t r2 = vld1q_f32(local_x + 8);
float32x4_t r3 = vld1q_f32(local_x + 12);
r0 = math::vActiveq_f32<Act>(r0);
r1 = math::vActiveq_f32<Act>(r1);
r2 = math::vActiveq_f32<Act>(r2);
r3 = math::vActiveq_f32<Act>(r3);
vst1q_f32(local_y, r0);
vst1q_f32(local_y + 4, r1);
vst1q_f32(local_y + 8, r2);
vst1q_f32(local_y + 12, r3);
}
x += (loop << 4);
y += (loop << 4);
#endif
for (size_t i = 0; i < remain; ++i) {
y[i] = math::Active<Act>(x[i]);
}
}
};
template <> template <>
bool ReluKernel<CPU, float>::Init(ReluParam<CPU> *param) { bool ReluKernel<CPU, float>::Init(ReluParam<CPU> *param) {
return true; return true;
...@@ -27,7 +72,21 @@ bool ReluKernel<CPU, float>::Init(ReluParam<CPU> *param) { ...@@ -27,7 +72,21 @@ bool ReluKernel<CPU, float>::Init(ReluParam<CPU> *param) {
template <> template <>
void ReluKernel<CPU, float>::Compute(const ReluParam<CPU> &param) { void ReluKernel<CPU, float>::Compute(const ReluParam<CPU> &param) {
ReluCompute<float>(param); const Tensor *input = param.InputX();
Tensor *output = param.Out();
ReluCompute<float, RELU>()(input, output);
}
template <>
bool Relu6Kernel<CPU, float>::Init(ReluParam<CPU> *param) {
return true;
}
template <>
void Relu6Kernel<CPU, float>::Compute(const ReluParam<CPU> &param) {
const Tensor *input = param.InputX();
Tensor *output = param.Out();
ReluCompute<float, RELU6>()(input, output);
} }
} // namespace operators } // namespace operators
......
...@@ -11,14 +11,111 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,14 +11,111 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifdef TRANSPOSE2_OP #ifdef TRANSPOSE2_OP
#include "operators/kernel/transpose2_kernel.h" #include "operators/kernel/transpose2_kernel.h"
#include "operators/kernel/central-arm-func/transpose2_arm_func.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
bool IsShuffleChannel(const std::vector<int> &axis) {
bool is_shuffle_channel = true;
if (axis.size() > 2 && axis[0] == 0 && axis[1] == 2 && axis[2] == 1) {
for (int i = 3; i < axis.size(); ++i) {
if (axis[i] != i) {
is_shuffle_channel = false;
break;
}
}
} else {
return false;
}
return is_shuffle_channel;
}
template <typename Dtype>
void ShuffleChannelCompute(const Transpose2Param<CPU> &param) {
const std::vector<int> &axis = param.Axis();
const Tensor *input = param.InputX();
const Dtype *input_ptr = input->data<Dtype>();
Tensor *output = param.Out();
Dtype *output_ptr = output->mutable_data<Dtype>();
// input and output's shape dimension must >= 2 && <= 6.
const framework::DDim &in_dim = input->dims();
const framework::DDim &out_dim = output->dims();
size_t offset = 1;
for (int i = 3; i < axis.size(); ++i) {
offset *= in_dim[i];
}
#pragma omp parallel for collapse(3)
for (int batch = 0; batch < out_dim[0]; ++batch) {
for (int c1 = 0; c1 < out_dim[1]; ++c1) {
for (int c2 = 0; c2 < out_dim[2]; ++c2) {
size_t out_offset =
((batch * out_dim[1] + c1) * out_dim[2] + c2) * offset;
size_t in_offset = ((batch * in_dim[1] + c2) * in_dim[2] + c1) * offset;
memcpy(output_ptr + out_offset, input_ptr + in_offset,
offset * sizeof(Dtype));
}
}
}
}
template <typename Dtype>
void Transpose2Compute(const Transpose2Param<CPU> &param) {
const std::vector<int> &axis = param.Axis();
const Tensor *input = param.InputX();
const Dtype *input_ptr = input->data<Dtype>();
Tensor *output = param.Out();
Dtype *output_ptr = output->mutable_data<Dtype>();
// input and output's shape dimension must >= 2 && <= 6.
const framework::DDim &in_dim = input->dims();
const framework::DDim &out_dim = output->dims();
// precompute inverted output dim and strides
size_t rout_dim[6], strides[6];
int permute = axis.size(); // permute must >=2 && <= 6.
for (int i = 0; i < permute; ++i) {
int k = permute - 1 - i;
strides[k] = 1;
for (int j = axis[i] + 1; j < permute; ++j) {
strides[k] *= in_dim[j];
}
rout_dim[k] = out_dim[i];
}
// unroll the first 2 dimensions
int reamin_dim = 1;
for (int i = 2; i < out_dim.size(); ++i) {
reamin_dim *= out_dim[i];
}
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < out_dim[0]; ++batch) {
for (int j = 0; j < out_dim[1]; ++j) {
size_t offset = batch * strides[permute - 1] + j * strides[permute - 2];
Dtype *out_ptr = output_ptr + (batch * out_dim[1] + j) * reamin_dim;
int indics[4] = {0, 0, 0, 0};
for (int k = 0; k < reamin_dim; ++k) {
out_ptr[k] = input_ptr[offset];
indics[0] += 1;
offset += strides[0];
for (int p = 0; p < permute - 3; ++p) {
if (indics[p] == rout_dim[p]) {
indics[p + 1] += 1;
indics[p] = 0;
offset += strides[p + 1];
offset -= rout_dim[p] * strides[p];
} else {
break;
}
}
}
}
}
}
template <> template <>
bool Transpose2Kernel<CPU, float>::Init(Transpose2Param<CPU> *param) { bool Transpose2Kernel<CPU, float>::Init(Transpose2Param<CPU> *param) {
return true; return true;
...@@ -26,10 +123,24 @@ bool Transpose2Kernel<CPU, float>::Init(Transpose2Param<CPU> *param) { ...@@ -26,10 +123,24 @@ bool Transpose2Kernel<CPU, float>::Init(Transpose2Param<CPU> *param) {
template <> template <>
void Transpose2Kernel<CPU, float>::Compute(const Transpose2Param<CPU> &param) { void Transpose2Kernel<CPU, float>::Compute(const Transpose2Param<CPU> &param) {
const std::vector<int> &axis = param.Axis();
bool shuffle_channel = IsShuffleChannel(axis);
if (shuffle_channel) {
if (param.InputX()->type() == typeid(int8_t)) {
ShuffleChannelCompute<int8_t>(param);
} else {
ShuffleChannelCompute<float>(param);
}
} else {
if (param.InputX()->type() == typeid(int8_t)) {
Transpose2Compute<int8_t>(param);
} else {
Transpose2Compute<float>(param); Transpose2Compute<float>(param);
}
}
} }
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif // TRANSPOSE2_OP
...@@ -25,6 +25,7 @@ limitations under the License. */ ...@@ -25,6 +25,7 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
void ConvAddBasic(const FusionConvAddParam<CPU> &param) { void ConvAddBasic(const FusionConvAddParam<CPU> &param) {
const Tensor *input = param.Input(); const Tensor *input = param.Input();
Tensor filter = *param.Filter(); Tensor filter = *param.Filter();
...@@ -106,7 +107,7 @@ void ConvAddBasic(const FusionConvAddParam<CPU> &param) { ...@@ -106,7 +107,7 @@ void ConvAddBasic(const FusionConvAddParam<CPU> &param) {
// gemm // gemm
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul<float>(filter_slice, false, col_matrix, false, math::matmul<float, float>(filter_slice, false, col_matrix, false,
static_cast<float>(1), &out_slice, static_cast<float>(1), &out_slice,
static_cast<float>(1), false, biase_data); static_cast<float>(1), false, biase_data);
} }
......
...@@ -25,24 +25,18 @@ limitations under the License. */ ...@@ -25,24 +25,18 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename P, typename S> template <typename Itype, typename Otype>
void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) { void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) {
const Tensor *input = param.Input(); const Tensor *input = param.Input();
Tensor filter = *param.Filter(); Tensor filter = *param.Filter();
Tensor bias = *param.Bias(); Tensor bias = *param.Bias();
int32_t axis = param.Axis(); int32_t axis = param.Axis();
S *bias_data = bias.data<S>(); Otype *bias_data = bias.data<Otype>();
Tensor *output = param.Output(); Tensor *output = param.Output();
output->mutable_data<P>(); output->mutable_data<Otype>();
float alpha = 1.0f; float alpha = 1.0f;
float beta = 1.0f; float beta = 1.0f;
#ifdef FUSION_CONVADDRELU_INT8_OP
alpha = param.InputScale()->data<float>()[0];
beta = 0.0f;
#endif
int32_t groups = param.Groups(); int32_t groups = param.Groups();
std::vector<int32_t> strides = param.Strides(); std::vector<int32_t> strides = param.Strides();
std::vector<int32_t> paddings = param.Paddings(); std::vector<int32_t> paddings = param.Paddings();
...@@ -70,7 +64,7 @@ void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) { ...@@ -70,7 +64,7 @@ void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) {
Tensor col; Tensor col;
Tensor col_matrix; Tensor col_matrix;
if (is_expand) { if (is_expand) {
col.mutable_data<P>(col_shape); col.mutable_data<Itype>(col_shape);
col_matrix.ShareDataWith(col); col_matrix.ShareDataWith(col);
col_matrix.Resize(col_matrix_shape); col_matrix.Resize(col_matrix_shape);
} }
...@@ -89,8 +83,8 @@ void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) { ...@@ -89,8 +83,8 @@ void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) {
int32_t in_step = static_cast<int32_t>(input->dims()[1]) / groups; int32_t in_step = static_cast<int32_t>(input->dims()[1]) / groups;
int32_t out_step = static_cast<int32_t>(output->dims()[1]) / groups; int32_t out_step = static_cast<int32_t>(output->dims()[1]) / groups;
math::Vol2ColFunctor<CPU, P> vol2col; math::Vol2ColFunctor<CPU, Itype> vol2col;
math::Im2ColFunctor<math::ColFormat::kCFO, CPU, P> im2col; math::Im2ColFunctor<math::ColFormat::kCFO, CPU, Itype> im2col;
for (int32_t i = 0; i < batch_size; i++) { for (int32_t i = 0; i < batch_size; i++) {
Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape); Tensor in_batch = input->Slice(i, i + 1).Resize(input_shape);
...@@ -118,8 +112,8 @@ void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) { ...@@ -118,8 +112,8 @@ void ConvAddReluCompute(const FusionConvAddReluParam<CPU> &param) {
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul(filter_slice, false, col_matrix, false, alpha, &out_slice, math::matmul<Itype, Otype>(filter_slice, false, col_matrix, false, alpha,
beta, true, bias_data); &out_slice, beta, true, bias_data);
} }
} }
} }
......
...@@ -106,9 +106,10 @@ inline void GemmConv(const ConvParam<CPU> &param) { ...@@ -106,9 +106,10 @@ inline void GemmConv(const ConvParam<CPU> &param) {
// gemm // gemm
Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step); Tensor out_slice = out_batch.Slice(g * out_step, (g + 1) * out_step);
Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step); Tensor filter_slice = filter.Slice(g * out_step, (g + 1) * out_step);
math::matmul(filter_slice, false, col_matrix, false, math::matmul<Itype, Otype>(filter_slice, false, col_matrix, false,
static_cast<float>(1), &out_slice, static_cast<float>(0), static_cast<float>(1), &out_slice,
false, static_cast<Otype *>(nullptr)); static_cast<float>(0), false,
static_cast<Otype *>(nullptr));
} }
} }
} }
......
...@@ -93,8 +93,8 @@ void ConvTransposeCompute(const ConvTransposeParam<CPU> &param) { ...@@ -93,8 +93,8 @@ void ConvTransposeCompute(const ConvTransposeParam<CPU> &param) {
Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step); Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step);
Tensor out_slice = output_batch.Slice(g * out_step, (g + 1) * out_step); Tensor out_slice = output_batch.Slice(g * out_step, (g + 1) * out_step);
math::matmul(filter_slice, true, in_slice, false, static_cast<P>(1.0), math::matmul<P, P>(filter_slice, true, in_slice, false,
&col_matrix, static_cast<P>(0.0)); static_cast<P>(1.0), &col_matrix, static_cast<P>(0.0));
if (data_dim == 2U) { if (data_dim == 2U) {
col2im(col, dilations, strides, col2im(col, dilations, strides,
std::vector<int>{paddings[0], paddings[1], paddings[0], std::vector<int>{paddings[0], paddings[1], paddings[0],
......
...@@ -23,20 +23,15 @@ limitations under the License. */ ...@@ -23,20 +23,15 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
template <typename P, typename S> template <typename Itype, typename Otype>
void FusionFcCompute(const FusionFcParam<CPU> &param) { void FusionFcCompute(const FusionFcParam<CPU> &param) {
const Tensor *input_x = param.InputX(); const Tensor *input_x = param.InputX();
const Tensor *input_y = param.InputY(); const Tensor *input_y = param.InputY();
Tensor *input_z = param.InputZ(); Tensor *input_z = param.InputZ();
S *input_z_data = input_z->data<S>(); Otype *input_z_data = input_z->data<Otype>();
int axis = param.Axis(); int axis = param.Axis();
Tensor *out = param.Out(); Tensor *out = param.Out();
// int m = out->dims()[0]; auto *out_data = out->mutable_data<Itype>();
// int n = out->dims()[1];
auto *out_data = out->mutable_data<P>();
float alpha = 1.0f;
float beta = 1.0f;
const Tensor x_matrix = const Tensor x_matrix =
input_x->dims().size() > 2 input_x->dims().size() > 2
...@@ -57,28 +52,14 @@ void FusionFcCompute(const FusionFcParam<CPU> &param) { ...@@ -57,28 +52,14 @@ void FusionFcCompute(const FusionFcParam<CPU> &param) {
axis = (axis == -1 ? out_dim.size() - input_z->dims().size() : axis); axis = (axis == -1 ? out_dim.size() - input_z->dims().size() : axis);
PADDLE_MOBILE_ENFORCE(axis == 1, " to fit broadcast, axis = 1. "); PADDLE_MOBILE_ENFORCE(axis == 1, " to fit broadcast, axis = 1. ");
if (std::is_same<P, int8_t>::value) {
#ifdef FUSION_FC_INT8_OP
alpha = param.InputScale()->data<float>()[0];
beta = 0.0f;
math::matmul(x_matrix, false, y_matrix, false, alpha, out, beta, false,
input_z_data, true);
#endif
} else {
// bias_data的维度和out的第二个维度一致 // bias_data的维度和out的第二个维度一致
int64_t classes = input_z->numel(); int64_t classes = input_z->numel();
for (int i = 0; i < out_dim[0]; i++) { for (int i = 0; i < out_dim[0]; i++) {
memory::Copy(out_data + i * classes, input_z_data, memory::Copy(out_data + i * classes, input_z_data, sizeof(Otype) * classes);
sizeof(float) * classes);
} }
math::matmul<Itype, Otype>(x_matrix, false, y_matrix, false,
math::matmul<float>(x_matrix, false, y_matrix, false, alpha, out, beta, static_cast<float>(1), out, static_cast<float>(1),
false); false);
}
PADDLE_MOBILE_ENFORCE(out_dim.size() == 2, " out_dim.size must be 2.");
// if (out_dim.size() != 2) {
// out->Resize(out_dim);
// }
} }
} // namespace operators } // namespace operators
......
...@@ -73,14 +73,14 @@ void MulCompute(const MulParam<CPU> &param) { ...@@ -73,14 +73,14 @@ void MulCompute(const MulParam<CPU> &param) {
} }
if (param.InputX()->type() == typeid(int8_t)) { if (param.InputX()->type() == typeid(int8_t)) {
out->mutable_data<int32_t>(); out->mutable_data<int32_t>();
math::matmul<float, int32_t>(x_matrix, false, y_matrix, false, math::matmul<int8_t, int32_t>(x_matrix, false, y_matrix, false,
static_cast<float>(1), out, static_cast<float>(1), out,
static_cast<float>(0)); static_cast<float>(0));
} else { } else {
out->mutable_data<float>(); out->mutable_data<float>();
math::matmul<float>(x_matrix, false, y_matrix, false, static_cast<float>(1), math::matmul<float, float>(x_matrix, false, y_matrix, false,
out, static_cast<float>(0)); static_cast<float>(1), out,
static_cast<float>(0));
} }
if (out_dim.size() != 2) { if (out_dim.size() != 2) {
out->Resize(out_dim); out->Resize(out_dim);
......
...@@ -17,103 +17,53 @@ limitations under the License. */ ...@@ -17,103 +17,53 @@ limitations under the License. */
#include <string> #include <string>
#include <vector> #include <vector>
#include "common/types.h"
#include "operators/math/pooling.h" #include "operators/math/pooling.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using framework::Tensor;
template <typename T, typename S>
void PoolBasic(std::string pooling_type, std::vector<int> ksize,
std::vector<int> strides, std::vector<int> paddings,
const Tensor *in_x, Tensor *out) {
if (pooling_type == "max") {
math::PoolFunctor<CPU, math::MaxPool<T>, T> pool2d_forward;
math::MaxPool<T> pool_process;
pool2d_forward(*in_x, ksize, strides, paddings, pool_process, out);
} else if (pooling_type == "avg") {
math::PoolFunctor<CPU, math::AvgPool<T, S>, T> pool2d_forward;
math::AvgPool<T, S> pool_process;
pool2d_forward(*in_x, ksize, strides, paddings, pool_process, out);
}
}
template <typename P> template <typename P>
void PoolCompute(const PoolParam<CPU> &param) { void PoolCompute(const PoolParam<CPU> &param) {
const Tensor *in_x = param.Input(); const framework::Tensor *input = param.Input();
Tensor *out = param.Output(); framework::Tensor *output = param.Output();
std::string pooling_type = param.PoolingType(); const std::string &pooling_type = param.PoolingType();
std::vector<int> ksize = param.Ksize(); std::vector<int> ksize = param.Ksize();
std::vector<int> strides = param.Strides(); std::vector<int> strides = param.Strides();
std::vector<int> paddings = param.Paddings(); std::vector<int> paddings = param.Paddings();
if (ksize.size() != 2) {
LOG(paddle_mobile::LogLevel::kLOG_ERROR)
<< "Pool op only supports 2D and 3D input.";
}
if (param.isGlobalPooling()) { if (param.isGlobalPooling()) {
for (size_t i = 0; i < ksize.size(); ++i) { for (size_t i = 0; i < ksize.size(); ++i) {
paddings[i] = 0; paddings[i] = 0;
ksize[i] = static_cast<int>(in_x->dims()[i + 2]); ksize[i] = static_cast<int>(input->dims()[i + 2]);
} }
} }
if (in_x->type() == typeid(int8_t)) { if (ksize[0] == 3 && ksize[0] == ksize[1]) {
if (pooling_type == "max" && ksize[0] == 3 && ksize[0] == ksize[1]) { if (pooling_type == "max" && strides[0] == strides[1]) {
if (strides[0] == strides[1] && strides[0] == 1) { if (strides[0] == 1) {
math::Pool3x3Maxs1_int8(in_x, out, paddings[0], paddings[1]); math::Pooling3x3<MAX, 1>()(*input, paddings, output);
} else if (strides[0] == strides[1] && strides[0] == 2) { } else if (strides[0] == 2) {
math::Pool3x3Maxs2_int8(in_x, out, paddings[0], paddings[1]); math::Pooling3x3<MAX, 2>()(*input, paddings, output);
} else { } else {
math::Pool3x3Max_int8(strides, paddings, in_x, out); math::Pooling<MAX>()(*input, ksize, strides, paddings, output);
} }
} else if (pooling_type == "avg" && strides[0] == strides[1]) {
if (strides[0] == 1) {
math::Pooling3x3<AVG, 1>()(*input, paddings, output);
} else if (strides[0] == 2) {
math::Pooling3x3<AVG, 2>()(*input, paddings, output);
} else { } else {
PoolBasic<int8_t, int32_t>(pooling_type, ksize, strides, paddings, in_x, math::Pooling<AVG>()(*input, ksize, strides, paddings, output);
out);
} }
} else { } else {
if (ksize[0] == 3 && ksize[0] == ksize[1]) { // Others
if (pooling_type == "max") {
if (strides[0] == strides[1] && strides[0] == 1 &&
paddings[0] == paddings[1] && paddings[1] == 1) {
math::Pool3x3Maxs1p1(in_x, out);
} else {
math::Pool3x3Max(strides, paddings, in_x, out);
} }
} else if (pooling_type == "avg") {
if (strides[0] == strides[1] && strides[0] == 1 &&
paddings[0] == paddings[1] && paddings[1] == 1) {
math::Pool3x3Avgs1p1(in_x, out);
} else { } else {
math::Pool3x3Avg(strides, paddings, in_x, out);
}
}
} else if (ksize[0] == 2 && ksize[0] == ksize[1] && strides[0] == 2 &&
strides[0] == strides[1] && paddings[0] == paddings[1] &&
paddings[1] == 0) {
#if __ARM_NEON
#if __aarch64__
PoolBasic<float, float>(pooling_type, ksize, strides, paddings, in_x,
out);
#else
/// todo: fix bug in Pool2x2
if (pooling_type == "max") { if (pooling_type == "max") {
math::Pool2x2Maxs2p0(strides, paddings, in_x, out); math::Pooling<MAX>()(*input, ksize, strides, paddings, output);
} else if (pooling_type == "avg") { } else if (pooling_type == "avg") {
math::Pool2x2Avgs2p0(strides, paddings, in_x, out); math::Pooling<AVG>()(*input, ksize, strides, paddings, output);
}
#endif
#else
PoolBasic<float, float>(pooling_type, ksize, strides, paddings, in_x,
out);
#endif // __ARM_NEON
} else { } else {
PoolBasic<float, float>(pooling_type, ksize, strides, paddings, in_x, // Others
out);
} }
} }
} }
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef RELU_OP
#pragma once
#include <operators/math/transform.h>
#include "operators/op_param.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
template <typename T>
struct ReluFunctor {
inline T operator()(T in) const { return in > 0 ? in : 0; }
};
/*
* @b 特化到具体平台的实现, param 从 op 层传入
* */
template <typename P>
void ReluCompute(const ReluParam<CPU> &param) {
const auto *input_x = param.InputX();
auto *input_x_ptr = input_x->data<float>();
auto *out = param.Out();
auto *out_ptr = out->mutable_data<float>();
int numel = input_x->numel();
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#if __aarch64__
if (numel > 0) {
int loop = numel >> 0x4;
int remain = numel & 0xF;
float32x4_t zero = vdupq_n_f32(0.f);
for (int i = 0; i < loop; ++i) {
float32x4_t r0 = vld1q_f32(input_x_ptr);
float32x4_t r1 = vld1q_f32(input_x_ptr + 4);
float32x4_t r2 = vld1q_f32(input_x_ptr + 8);
float32x4_t r3 = vld1q_f32(input_x_ptr + 12);
r0 = vmaxq_f32(r0, zero);
r1 = vmaxq_f32(r1, zero);
r2 = vmaxq_f32(r2, zero);
r3 = vmaxq_f32(r3, zero);
vst1q_f32(out_ptr, r0);
vst1q_f32(out_ptr + 4, r1);
vst1q_f32(out_ptr + 8, r2);
vst1q_f32(out_ptr + 12, r3);
input_x_ptr += 16;
out_ptr += 16;
}
for (int i = 0; i < remain; ++i) {
out_ptr[i] = (input_x_ptr[i] > 0) * input_x_ptr[i];
}
#else
if (numel > 64) {
asm volatile(
"pld [%[input_x_ptr], #0] \n\t"
"vmov.f32 q8, #0.0 \n\t"
"subs %[num], %[num], #32 \n\t"
"blt end_num_%= \n\t"
"loop_num_%=: \n\t"
"pld [%[input_x_ptr], #1024] \n\t"
"vld1.32 {q0, q1}, [%[input_x_ptr]]! \n\t"
"vld1.32 {q2, q3}, [%[input_x_ptr]]! \n\t"
"vld1.32 {q4, q5}, [%[input_x_ptr]]! \n\t"
"vld1.32 {q6, q7}, [%[input_x_ptr]]! \n\t"
"vmax.f32 q0, q0, q8 \n\t"
"vmax.f32 q1, q1, q8 \n\t"
"vmax.f32 q2, q2, q8 \n\t"
"vmax.f32 q3, q3, q8 \n\t"
"vmax.f32 q4, q4, q8 \n\t"
"vmax.f32 q5, q5, q8 \n\t"
"vmax.f32 q6, q6, q8 \n\t"
"vmax.f32 q7, q7, q8 \n\t"
"vst1.32 {q0, q1}, [%[out_ptr]]! \n\t"
"vst1.32 {q2, q3}, [%[out_ptr]]! \n\t"
"vst1.32 {q4, q5}, [%[out_ptr]]! \n\t"
"vst1.32 {q6, q7}, [%[out_ptr]]! \n\t"
"subs %[num], %[num], #32 \n\t"
"bge loop_num_%= \n\t"
"end_num_%=: \n\t"
"cmp %[num], #0 \n\t"
"bge end_%= \n\t"
"mov r6, #4 \n\t"
"mul r5, %[num], r6 \n\t"
"add %[input_x_ptr], %[input_x_ptr], r5 \n\t"
"vld1.32 {q0, q1}, [%[input_x_ptr]]! \n\t"
"vld1.32 {q2, q3}, [%[input_x_ptr]]! \n\t"
"vld1.32 {q4, q5}, [%[input_x_ptr]]! \n\t"
"vld1.32 {q6, q7}, [%[input_x_ptr]]! \n\t"
"vmax.f32 q0, q0, q8 \n\t"
"vmax.f32 q1, q1, q8 \n\t"
"vmax.f32 q2, q2, q8 \n\t"
"vmax.f32 q3, q3, q8 \n\t"
"vmax.f32 q4, q4, q8 \n\t"
"vmax.f32 q5, q5, q8 \n\t"
"vmax.f32 q6, q6, q8 \n\t"
"vmax.f32 q7, q7, q8 \n\t"
"add %[out_ptr], %[out_ptr], r5 \n\t"
"vst1.32 {q0, q1}, [%[out_ptr]]! \n\t"
"vst1.32 {q2, q3}, [%[out_ptr]]! \n\t"
"vst1.32 {q4, q5}, [%[out_ptr]]! \n\t"
"vst1.32 {q6, q7}, [%[out_ptr]]! \n\t"
"end_%=: \n\t"
:
:
[out_ptr] "r"(out_ptr), [input_x_ptr] "r"(input_x_ptr), [num] "r"(numel)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "r5",
"r6");
#endif
} else {
#endif
ReluFunctor<float> func_;
math::Transform trans;
trans(input_x_ptr, input_x_ptr + numel, out_ptr, func_);
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
}
#endif
}
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef TRANSPOSE2_OP
#pragma once
#include <vector>
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename P>
void Transpose2Compute(const Transpose2Param<CPU>& param) {
const auto* input_x = param.InputX();
const auto input_x_dims = input_x->dims();
auto* out = param.Out();
const auto axis = param.Axis();
const auto* input_x_data = input_x->data<float>();
auto* out_data = out->mutable_data<float>();
size_t ndim = axis.size();
std::vector<int> xdim(ndim);
std::vector<int> xstride(ndim);
std::vector<int> xout(ndim);
for (int i = 0; i < ndim; i++) {
int j = ndim - 1 - i;
xdim[j] = input_x_dims[axis[i]];
xstride[j] = 1;
for (int k = axis[i] + 1; k < ndim; k++) {
xstride[j] *= input_x_dims[k];
}
xout[j] = xstride[j] * xdim[j];
}
auto numel = input_x->numel();
size_t pind = 0;
std::vector<int> ind(ndim);
for (int i = 0; i < numel; i++) {
out_data[i] = input_x_data[pind];
ind[0]++;
pind += xstride[0];
for (int j = 0; j < ndim - 1; j++) {
if (ind[j] == xdim[j]) {
ind[j + 1]++;
ind[j] = 0;
pind += xstride[j + 1];
pind -= xout[j];
} else {
break;
}
}
}
}
} // namespace operators
} // namespace paddle_mobile
#endif
...@@ -21,23 +21,6 @@ limitations under the License. */ ...@@ -21,23 +21,6 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
// vector<int> pos;
// template <typename T>
// void TransposeFunc(const int numel, const T* input, const vector<int> axis,
// const vector<int> old_strides, const vector<int>
// new_strides, T* output) {
// for (int i = 0; i < numel; ++i) {
// int old_idx = 0;
// int idx = i;
// for (int j = 0; j < axis.size(); ++j) {
// int order = axis[j];
// old_idx += (idx / new_strides[j]) * old_strides[order];
// idx %= new_strides[j];
// }
// output[i] = input[old_idx];
// }
// }
template <typename P> template <typename P>
void TransposeCompute(const TransposeParam<CPU>& param) { void TransposeCompute(const TransposeParam<CPU>& param) {
const auto* input_x = param.InputX(); const auto* input_x = param.InputX();
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#ifdef FUSION_DEQUANT_ADD_BN_OP
#include "framework/operator.h"
#include "operators/op_param.h"
namespace paddle_mobile {
namespace operators {
template <typename DeviceType, typename T>
class FusionDequantAddBNKernel
: public framework::OpKernelBase<DeviceType,
FusionDequantAddBNParam<DeviceType>> {
public:
void Compute(const FusionDequantAddBNParam<DeviceType> &param);
bool Init(FusionDequantAddBNParam<DeviceType> *param);
};
} // namespace operators
} // namespace paddle_mobile
#endif
...@@ -20,26 +20,37 @@ limitations under the License. */ ...@@ -20,26 +20,37 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
#define DECLARE_KERNEL(KernelClass, KernelParam) \
template <typename DeviceType, typename T> \
class KernelClass \
: public framework::OpKernelBase<DeviceType, KernelParam<DeviceType>> { \
public: \
bool Init(KernelParam<DeviceType> *param); \
void Compute(const KernelParam<DeviceType> &param); \
};
#ifdef FUSION_DEQUANT_BN_OP
DECLARE_KERNEL(FusionDequantBNKernel, FusionDequantBNParam);
#endif
#ifdef FUSION_DEQUANT_BN_RELU_OP #ifdef FUSION_DEQUANT_BN_RELU_OP
template <typename DeviceType, typename T> DECLARE_KERNEL(FusionDequantBNReluKernel, FusionDequantBNParam);
class FusionDequantBNReluKernel #endif
: public framework::OpKernelBase<DeviceType,
FusionDequantBNReluParam<DeviceType>> { #ifdef FUSION_DEQUANT_ADD_BN_OP
public: DECLARE_KERNEL(FusionDequantAddBNKernel, FusionDequantAddBNParam);
void Compute(const FusionDequantBNReluParam<DeviceType> &param);
bool Init(FusionDequantBNReluParam<DeviceType> *param);
};
#endif #endif
#ifdef FUSION_DEQUANT_ADD_BN_RELU_OP #ifdef FUSION_DEQUANT_ADD_BN_RELU_OP
template <typename DeviceType, typename T> DECLARE_KERNEL(FusionDequantAddBNReluKernel, FusionDequantAddBNParam);
class FusionDequantAddBNReluKernel #endif
: public framework::OpKernelBase<DeviceType,
FusionDequantAddBNReluParam<DeviceType>> { #ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP
public: DECLARE_KERNEL(FusionDequantAddBNQuantKernel, FusionDequantAddBNQuantParam);
void Compute(const FusionDequantAddBNReluParam<DeviceType> &param); #endif
bool Init(FusionDequantAddBNReluParam<DeviceType> *param);
}; #ifdef FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP
DECLARE_KERNEL(FusionDequantAddBNReluQuantKernel, FusionDequantAddBNQuantParam);
#endif #endif
} // namespace operators } // namespace operators
......
...@@ -19,7 +19,7 @@ limitations under the License. */ ...@@ -19,7 +19,7 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using namespace framework;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class FeedKernel class FeedKernel
: public framework::OpKernelBase<DeviceType, FeedParam<DeviceType>> { : public framework::OpKernelBase<DeviceType, FeedParam<DeviceType>> {
......
...@@ -17,7 +17,6 @@ limitations under the License. */ ...@@ -17,7 +17,6 @@ limitations under the License. */
#pragma once #pragma once
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/op_param.h" #include "operators/op_param.h"
namespace paddle_mobile { namespace paddle_mobile {
...@@ -30,6 +29,15 @@ class ReluKernel ...@@ -30,6 +29,15 @@ class ReluKernel
void Compute(const ReluParam<DeviceType>& param); void Compute(const ReluParam<DeviceType>& param);
bool Init(ReluParam<DeviceType>* param); bool Init(ReluParam<DeviceType>* param);
}; };
template <typename DeviceType, typename T>
class Relu6Kernel
: public framework::OpKernelBase<DeviceType, ReluParam<DeviceType>> {
public:
void Compute(const ReluParam<DeviceType>& param);
bool Init(ReluParam<DeviceType>* param);
};
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
...@@ -13,9 +13,17 @@ See the License for the specific language governing permissions and ...@@ -13,9 +13,17 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#pragma once #pragma once
#include <math.h>
#include <algorithm>
#include <cmath>
#include <string> #include <string>
#include "common/enforce.h" #include "common/enforce.h"
#include "common/types.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#include "operators/math/math_func_neon.h"
#endif
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
...@@ -24,68 +32,92 @@ namespace math { ...@@ -24,68 +32,92 @@ namespace math {
#define SIGMOID_THRESHOLD_MAX 13.0 #define SIGMOID_THRESHOLD_MAX 13.0
#define EXP_MAX_INPUT 40.0 #define EXP_MAX_INPUT 40.0
enum ActivationType {
kSigmoid,
kReLU,
kTanh,
kIdentity,
};
inline ActivationType GetActivationType(const std::string &type) { inline ActivationType GetActivationType(const std::string &type) {
if (type == "sigmoid") { if (type == "sigmoid") {
return ActivationType::kSigmoid; return ActivationType::SIGMOID;
} else if (type == "relu") { } else if (type == "relu") {
return ActivationType::kReLU; return ActivationType::RELU;
} else if (type == "tanh") { } else if (type == "tanh") {
return ActivationType::kTanh; return ActivationType::TANH;
} else if (type == "identity" || type == "") { } else if (type == "identity" || type == "") {
return ActivationType::kIdentity; return ActivationType::IDENTITY;
} }
PADDLE_MOBILE_THROW_EXCEPTION("Not support activation type."); PADDLE_MOBILE_THROW_EXCEPTION("Not support activation type.");
} }
namespace forward { #if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <ActivationType Act = IDENTITY>
inline float32x4_t vActiveq_f32(const float32x4_t &x) {
return x;
}
template <typename T> template <>
T Identity(const T a) { inline float32x4_t vActiveq_f32<RELU>(const float32x4_t &x) {
return a; float32x4_t __zero = vdupq_n_f32(0.f);
return vmaxq_f32(x, __zero);
} }
template <typename T> template <>
T Relu(const T a) { inline float32x4_t vActiveq_f32<RELU6>(const float32x4_t &x) {
return a > static_cast<T>(0.0) ? a : static_cast<T>(0.0); float32x4_t __zero = vdupq_n_f32(0.f);
float32x4_t __six = vdupq_n_f32(6.f);
return vminq_f32(vmaxq_f32(x, __zero), __six);
} }
template <typename T> template <>
T Sigmoid(const T a) { inline float32x4_t vActiveq_f32<SIGMOID>(const float32x4_t &x) {
const T min = SIGMOID_THRESHOLD_MIN; float32x4_t __one = vdupq_n_f32(1.f);
const T max = SIGMOID_THRESHOLD_MAX; float32x4_t __x = vnegq_f32(x);
T tmp = (a < min) ? min : ((a > max) ? max : a); __x = exp_ps(__x);
return static_cast<T>(1.0) / (static_cast<T>(1.0) + exp(-tmp)); __x = vaddq_f32(__x, __one);
float32x4_t __out = vrecpeq_f32(__x);
return vmulq_f32(vrecpsq_f32(__x, __out), __out);
} }
template <typename T> template <>
T Tanh(const T a) { inline float32x4_t vActiveq_f32<TANH>(const float32x4_t &x) {
T tmp = -2.0 * a; float32x4_t __one = vdupq_n_f32(1.f);
tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp; float32x4_t __x = vnegq_f32(x);
return (2.0 / (1.0 + exp(tmp))) - 1.0; __x = vmulq_n_f32(__x, 2.f);
__x = exp_ps(__x);
__x = vaddq_f32(__x, __one);
float32x4_t __out = vrecpeq_f32(__x);
__out = vmulq_f32(vrecpsq_f32(__x, __out), __out);
__out = vmulq_n_f32(__out, 2.f);
return vsubq_f32(__out, __one);
} }
#endif
} // namespace forward template <ActivationType Act = IDENTITY>
inline float Active(const float &x) {
return x;
}
template <typename T> template <>
struct Active { inline float Active<RELU>(const float &x) {
typedef T (*Act)(T); return std::max(x, 0.f);
}; }
static Active<float>::Act kActFloat[] = { template <>
&forward::Sigmoid<float>, &forward::Relu<float>, &forward::Tanh<float>, inline float Active<RELU6>(const float &x) {
&forward::Identity<float>}; return std::min(std::max(x, 0.f), 6.f);
}
namespace forward { template <>
inline float activation(float a, int index) { return kActFloat[index](a); } inline float Active<SIGMOID>(const float &x) {
// float tmp = x > SIGMOID_THRESHOLD_MAX ? SIGMOID_THRESHOLD_MAX : x;
// tmp = x > SIGMOID_THRESHOLD_MIN ? x : SIGMOID_THRESHOLD_MIN;
// return 1.f / (1.f + exp(-tmp));
return 1.f / (1.f + exp(-x));
}
} // namespace forward template <>
inline float Active<TANH>(const float &x) {
// float tmp = -2.f * x;
// tmp = (tmp > EXP_MAX_INPUT) ? EXP_MAX_INPUT : tmp;
// return (2.f / (1.f + exp(tmp))) - 1.f;
return 2.f / (1.f + exp(-2.f * x)) - 1.f;
}
} // namespace math } // namespace math
} // namespace operators } // namespace operators
......
...@@ -1260,10 +1260,10 @@ void Gemm::AddDot4x4(int k, const float *a, const float *b, float *c, int ldc) { ...@@ -1260,10 +1260,10 @@ void Gemm::AddDot4x4(int k, const float *a, const float *b, float *c, int ldc) {
"q10", "q11", "q12", "q13"); "q10", "q11", "q12", "q13");
} }
/* void Gemm::VectorKernel(int m, int n, int k, float alpha, const float *A,
void Gemm::VectorKernel(int m, int n, int k, float alpha, const float *A, int int lda, const float *B, int ldb, float beta, float *C,
lda, const float *B, int ldb, float beta, float *C, int ldc, bool relu) { float int ldc, bool relu) {
*bufferC = static_cast<float *>(memory::Alloc(sizeof(float) * n)); float *bufferC = static_cast<float *>(memory::Alloc(sizeof(float) * n));
const float *a0, *b0, *b1, *b2, *b3; const float *a0, *b0, *b1, *b2, *b3;
float *c0, *C0; float *c0, *C0;
...@@ -1482,6 +1482,7 @@ lda, const float *B, int ldb, float beta, float *C, int ldc, bool relu) { float ...@@ -1482,6 +1482,7 @@ lda, const float *B, int ldb, float beta, float *C, int ldc, bool relu) { float
} }
} }
/*
void Gemm::VectorKernelWithBn(int m, int n, int k, float alpha, const float *A, void Gemm::VectorKernelWithBn(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float *C, int lda, const float *B, int ldb, float beta, float *C,
int ldc, bool relu, float *new_scale, float *new_bias) { int ldc, bool relu, float *new_scale, float *new_bias) {
...@@ -2579,9 +2580,8 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc, ...@@ -2579,9 +2580,8 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
} }
} }
/* // C = A * B
// C = A * B void Gemm::VecWriteBasic(int n, float *c, float *C, int ldc) {
void Gemm::VecWriteBasic(int n, float *c, float *C, int ldc) {
int nc1 = n / 16; int nc1 = n / 16;
int _nc1 = n % 16; int _nc1 = n % 16;
int nc2 = _nc1 / 4; int nc2 = _nc1 / 4;
...@@ -2624,13 +2624,13 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc, ...@@ -2624,13 +2624,13 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
: :
: [C] "r"(C), [c] "r"(c), [nc1] "r"(nc1), [nc2] "r"(nc2), [nc3] "r"(nc3) : [C] "r"(C), [c] "r"(c), [nc1] "r"(nc1), [nc2] "r"(nc2), [nc3] "r"(nc3)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5"); : "memory", "q0", "q1", "q2", "q3", "q4", "q5");
} }
// C = alpha * A * B + beta * C // C = alpha * A * B + beta * C
void Gemm::VecWriteWithAlphaBeta(int n, float *c, float *C, int ldc) {} void Gemm::VecWriteWithAlphaBeta(int n, float *c, float *C, int ldc) {}
// C = A * B + C // C = A * B + C
void Gemm::VecWriteWithAdd(int n, float *c, float *C, int ldc) { void Gemm::VecWriteWithAdd(int n, float *c, float *C, int ldc) {
int nc1 = n / 16; int nc1 = n / 16;
int _nc1 = n % 16; int _nc1 = n % 16;
...@@ -2657,18 +2657,18 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc, ...@@ -2657,18 +2657,18 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
: [C] "+r"(C), [c] "+r"(c) : [C] "+r"(C), [c] "+r"(c)
: [nc1] "r"(nc1) : [nc1] "r"(nc1)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q10", : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q10", "q11",
"q11", "q12", "q13"); "q12", "q13");
if (_nc1 != 0) { if (_nc1 != 0) {
for (int j = 0; j < _nc1; j++) { for (int j = 0; j < _nc1; j++) {
*C++ += *c++; *C++ += *c++;
} }
} }
} }
// C = A * B + C, relu(C) // C = A * B + C, relu(C)
void Gemm::VecWriteWithAddRelu(int n, float *c, float *C, int ldc) { void Gemm::VecWriteWithAddRelu(int n, float *c, float *C, int ldc) {
int nc1 = n / 16; int nc1 = n / 16;
int _nc1 = n % 16; int _nc1 = n % 16;
...@@ -2700,8 +2700,8 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc, ...@@ -2700,8 +2700,8 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
: [C] "+r"(C), [c] "+r"(c) : [C] "+r"(C), [c] "+r"(c)
: [nc1] "r"(nc1) : [nc1] "r"(nc1)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q10", : "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q10", "q11",
"q11", "q12", "q13"); "q12", "q13");
if (_nc1 != 0) { if (_nc1 != 0) {
for (int j = 0; j < _nc1; j++) { for (int j = 0; j < _nc1; j++) {
...@@ -2713,8 +2713,9 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc, ...@@ -2713,8 +2713,9 @@ void Gemm::WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
c++; c++;
} }
} }
} }
/*
// C = A * B, batchnorm(C) // C = A * B, batchnorm(C)
void Gemm::VecWriteWithBn(int n, float *c, float *C, int ldc, float *scale, void Gemm::VecWriteWithBn(int n, float *c, float *C, int ldc, float *scale,
float *bias) { float *bias) {
...@@ -3149,13 +3150,18 @@ void Gemm::SgemmWithPRelu(int m, int n, int k, const float *A, int lda, ...@@ -3149,13 +3150,18 @@ void Gemm::SgemmWithPRelu(int m, int n, int k, const float *A, int lda,
void Gemm::Sgemm_omp(int m, int n, int k, float alpha, const float *A, int lda, void Gemm::Sgemm_omp(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc, const float *B, int ldb, float beta, float *C, int ldc,
bool relu, float *bias) { bool relu, float *bias) {
if (m == 1 && bias == nullptr) {
return VectorKernel(m, n, k, alpha, A, lda, B, ldb, beta, C, ldc, relu);
}
#ifdef _OPENMP #ifdef _OPENMP
int max_threads = omp_get_max_threads(); int max_threads = omp_get_max_threads();
#else #else
int max_threads = 1; int max_threads = 1;
#endif #endif
int L1 = 64 / max_threads * 1024; // int L1 = 64 / max_threads * 1024;
int L = (max_threads > 2) ? 64 : 32;
int L1 = L / max_threads * 1024;
KC = k; KC = k;
zero = static_cast<float *>(paddle_mobile::memory::Alloc(sizeof(float) * KC)); zero = static_cast<float *>(paddle_mobile::memory::Alloc(sizeof(float) * KC));
memset(static_cast<void *>(zero), 0, sizeof(float) * KC); memset(static_cast<void *>(zero), 0, sizeof(float) * KC);
......
...@@ -105,12 +105,11 @@ void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb, ...@@ -105,12 +105,11 @@ void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb,
float *c, float *C, int ldc, float *p, float *c, float *C, int ldc, float *p,
std::string mode, float *bias, float *bias1); std::string mode, float *bias, float *bias1);
/*
// 向量矩阵乘法 (M = 1) // 向量矩阵乘法 (M = 1)
void VectorKernel(int m, int n, int k, float alpha, const float *A, int lda, void VectorKernel(int m, int n, int k, float alpha, const float *A, int lda,
const float *B, int ldb, float beta, float *C, int ldc, const float *B, int ldb, float beta, float *C, int ldc,
bool relu); bool relu);
/*
void VectorKernelWithBn(int m, int n, int k, float alpha, const float *A, void VectorKernelWithBn(int m, int n, int k, float alpha, const float *A,
int lda, const float *B, int ldb, float beta, float int lda, const float *B, int ldb, float beta, float
*C, int ldc, bool relu, float *new_scale, float *new_bias); *C, int ldc, bool relu, float *new_scale, float *new_bias);
...@@ -149,7 +148,6 @@ void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb, ...@@ -149,7 +148,6 @@ void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb,
void WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc, void WriteWithBnAddRelu(int mc, int nc, float *c, float *C, int ldc,
float *new_scale, float *new_bias, float *bias1); float *new_scale, float *new_bias, float *bias1);
/*
// 向量矩阵乘法结果回写 // 向量矩阵乘法结果回写
// C = A * B // C = A * B
void VecWriteBasic(int n, float *c, float *C, int ldc); void VecWriteBasic(int n, float *c, float *C, int ldc);
...@@ -159,12 +157,13 @@ void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb, ...@@ -159,12 +157,13 @@ void PackMatrixB(int k, int n, int n_tail, const float *B, int ldb,
void VecWriteWithAdd(int n, float *c, float *C, int ldc); void VecWriteWithAdd(int n, float *c, float *C, int ldc);
// C = A * B + C, relu(C) // C = A * B + C, relu(C)
void VecWriteWithAddRelu(int n, float *c, float *C, int ldc); void VecWriteWithAddRelu(int n, float *c, float *C, int ldc);
/*
// C = A * B, batchnorm(C) // C = A * B, batchnorm(C)
void VecWriteWithBn(int n, float *c, float *C, int ldc, float *new_scale, void VecWriteWithBn(int n, float *c, float *C, int ldc, float *new_scale,
float *new_bias); float *new_bias);
// C = A * B, batchnorm(C), relu(C) // C = A * B, batchnorm(C), relu(C)
void VecWriteWithBnRelu(int n, float *c, float *C, int ldc, float *new_scale, void VecWriteWithBnRelu(int n, float *c, float *C, int ldc, float
float *new_bias); *new_scale, float *new_bias);
*/ */
// 32位 float 矩阵乘法 // 32位 float 矩阵乘法
...@@ -392,7 +391,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, ...@@ -392,7 +391,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
packedB_int8 = static_cast<int8_t *>( packedB_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC)); paddle_mobile::memory::Alloc(sizeof(int8_t) * KC * NC));
#if __aarch64__ #if __aarch64__
// TODO() // TODO(paddle mobile)
#else #else
PackMatrixB_omp_2c_16(k, n, n % NR_INT8, B, ldb, packedB_int8); PackMatrixB_omp_2c_16(k, n, n % NR_INT8, B, ldb, packedB_int8);
#endif #endif
...@@ -414,7 +413,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, ...@@ -414,7 +413,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
packedA_int8 = static_cast<int8_t *>( packedA_int8 = static_cast<int8_t *>(
paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC)); paddle_mobile::memory::Alloc(sizeof(int8_t) * MC * KC));
#if __aarch64__ #if __aarch64__
// TODO() // TODO(paddle mobile)
#else #else
PackMatrixA_omp_4r_16(m, k, m % MR_INT8, A, lda, packedA_int8); PackMatrixA_omp_4r_16(m, k, m % MR_INT8, A, lda, packedA_int8);
#endif #endif
...@@ -438,7 +437,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, ...@@ -438,7 +437,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
int8_t *local_A = packedA_int8 + MC * KC * local_threads; int8_t *local_A = packedA_int8 + MC * KC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads; int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__ #if __aarch64__
// TODO() // TODO(paddle mobile)
#else #else
PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, local_A); PackMatrixA_4r_16(mc, k, mc % MR_INT8, &A(i, 0), lda, local_A);
#endif #endif
...@@ -468,7 +467,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha, ...@@ -468,7 +467,7 @@ void Gemm::Sgemm_omp(int32_t m, int32_t n, int32_t k, float alpha,
int8_t *local_B = packedB_int8 + KC * NC * local_threads; int8_t *local_B = packedB_int8 + KC * NC * local_threads;
int32_t *local_C = packedC_int32 + MC * NC * local_threads; int32_t *local_C = packedC_int32 + MC * NC * local_threads;
#if __aarch64__ #if __aarch64__
// TODO() // TODO(paddle mobile)
#else #else
PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, local_B); PackMatrixB_2c_16(k, nc, nc % NR_INT8, &B(0, j), ldb, local_B);
#endif #endif
......
...@@ -11,13 +11,14 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,13 +11,14 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifdef GRU_OP #ifdef GRU_OP
#include "operators/math/gru_compute.h" #include "operators/math/gru_compute.h"
#include "common/types.h" #include "common/types.h"
#include "operators/math/activation_functions.h" #include "operators/math/activation.h"
#include "operators/math/gemm.h" #include "operators/math/gemm.h"
#include "operators/math/gru_cpu_kernel.h" #include "operators/math/gru_cpu_kernel.h"
#include "operators/math/gru_kernel.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
...@@ -43,8 +44,7 @@ struct GRUUnitFunctor<CPU, T> { ...@@ -43,8 +44,7 @@ struct GRUUnitFunctor<CPU, T> {
#endif #endif
} }
forward_reset_output(forward::gru_resetOutput<T>(), value, frame_size, forward_reset_output(value, frame_size, batch_size, active_gate);
batch_size, active_gate);
if (value.prev_out_value) { if (value.prev_out_value) {
#ifdef _OPENMP #ifdef _OPENMP
...@@ -60,8 +60,7 @@ struct GRUUnitFunctor<CPU, T> { ...@@ -60,8 +60,7 @@ struct GRUUnitFunctor<CPU, T> {
#endif #endif
} }
forward_final_output(forward::gru_finalOutput<T>(), value, frame_size, forward_final_output(value, frame_size, batch_size, active_node);
batch_size, active_node);
} }
}; };
......
...@@ -11,7 +11,7 @@ limitations under the License. */ ...@@ -11,7 +11,7 @@ limitations under the License. */
#ifdef GRU_OP #ifdef GRU_OP
#pragma once #pragma once
#include "operators/math/activation_functions.h" #include "operators/math/activation.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
......
...@@ -11,21 +11,22 @@ distributed under the License is distributed on an "AS IS" BASIS, ...@@ -11,21 +11,22 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#ifdef GRU_OP #ifdef GRU_OP
#pragma once #pragma once
#include <type_traits> #include <type_traits>
#include "operators/math/activation_functions.h" #include "operators/math/activation.h"
#include "operators/math/gru_compute.h" #include "operators/math/gru_compute.h"
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
template <class OpResetOutput, typename T> template <typename T, ActivationType Act>
void hl_naive_gru_forward_reset_output(OpResetOutput op_reset_output, void hl_naive_gru_forward_reset_output(T *gate_value, T *reset_output_value,
T *gate_value, T *reset_output_value, T *prev_output_value, int frame_size) {
T *prev_output_value, int frame_size,
ActivationType active_gate) {
T r_value_update_gate; T r_value_update_gate;
T r_value_reset_gate; T r_value_reset_gate;
T r_value_reset_output; T r_value_reset_output;
...@@ -33,27 +34,57 @@ void hl_naive_gru_forward_reset_output(OpResetOutput op_reset_output, ...@@ -33,27 +34,57 @@ void hl_naive_gru_forward_reset_output(OpResetOutput op_reset_output,
T *update_gate = gate_value; T *update_gate = gate_value;
T *reset_gate = gate_value + frame_size; T *reset_gate = gate_value + frame_size;
for (int i = 0; i < frame_size; i++) { int remain = frame_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = remain >> 3;
remain = remain & 0x7;
float32x4_t prev0 = vdupq_n_f32(0.f);
float32x4_t prev1 = vdupq_n_f32(0.f);
for (int i = 0; i < loop; ++i) {
float32x4_t update0 = vld1q_f32(update_gate);
float32x4_t update1 = vld1q_f32(update_gate + 4);
float32x4_t reset0 = vld1q_f32(reset_gate);
float32x4_t reset1 = vld1q_f32(reset_gate + 4);
if (prev_output_value) {
prev0 = vld1q_f32(prev_output_value);
prev1 = vld1q_f32(prev_output_value + 4);
prev_output_value += 8;
}
update0 = vActiveq_f32<Act>(update0);
update1 = vActiveq_f32<Act>(update1);
reset0 = vActiveq_f32<Act>(reset0);
reset1 = vActiveq_f32<Act>(reset1);
float32x4_t output0 = vmulq_f32(prev0, reset0);
float32x4_t output1 = vmulq_f32(prev1, reset1);
vst1q_f32(update_gate, update0);
vst1q_f32(update_gate + 4, update1);
vst1q_f32(reset_gate, reset0);
vst1q_f32(reset_gate + 4, reset1);
vst1q_f32(reset_output_value, output0);
vst1q_f32(reset_output_value + 4, output1);
update_gate += 8;
reset_gate += 8;
reset_output_value += 8;
}
#endif // __ARM_NEON__
for (int i = 0; i < remain; i++) {
r_value_update_gate = update_gate[i]; r_value_update_gate = update_gate[i];
r_value_reset_gate = reset_gate[i]; r_value_reset_gate = reset_gate[i];
if (prev_output_value) { if (prev_output_value) {
r_prev_out = prev_output_value[i]; r_prev_out = prev_output_value[i];
} }
r_value_update_gate = Active<Act>(r_value_update_gate);
op_reset_output(&r_value_update_gate, &r_value_reset_gate, &r_prev_out, r_value_reset_gate = Active<Act>(r_value_reset_gate);
&r_value_reset_output, active_gate); r_value_reset_output = r_prev_out * r_value_reset_gate;
update_gate[i] = r_value_update_gate; update_gate[i] = r_value_update_gate;
reset_gate[i] = r_value_reset_gate; reset_gate[i] = r_value_reset_gate;
reset_output_value[i] = r_value_reset_output; reset_output_value[i] = r_value_reset_output;
} }
} }
template <class OpFinalOutput, typename T> template <typename T, ActivationType Act>
void hl_naive_gru_forward_final_output(OpFinalOutput op_final_output, void hl_naive_gru_forward_final_output(T *gate_value, T *prev_output_value,
T *gate_value, T *prev_output_value, T *output_value, int frame_size) {
T *output_value, int frame_size,
ActivationType active_node) {
T r_value_update_gate; T r_value_update_gate;
T r_value_frame_state; T r_value_frame_state;
T r_prev_out = 0; T r_prev_out = 0;
...@@ -61,30 +92,73 @@ void hl_naive_gru_forward_final_output(OpFinalOutput op_final_output, ...@@ -61,30 +92,73 @@ void hl_naive_gru_forward_final_output(OpFinalOutput op_final_output,
T *update_gate = gate_value; T *update_gate = gate_value;
T *frame_state = gate_value + frame_size * 2; T *frame_state = gate_value + frame_size * 2;
for (int i = 0; i < frame_size; i++) { int remain = frame_size;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
int loop = remain >> 3;
remain = remain & 0x7;
float32x4_t prev0 = vdupq_n_f32(0.f);
float32x4_t prev1 = vdupq_n_f32(0.f);
for (int i = 0; i < loop; ++i) {
float32x4_t update0 = vld1q_f32(update_gate);
float32x4_t update1 = vld1q_f32(update_gate + 4);
float32x4_t state0 = vld1q_f32(frame_state);
float32x4_t state1 = vld1q_f32(frame_state + 4);
if (prev_output_value) {
prev0 = vld1q_f32(prev_output_value);
prev1 = vld1q_f32(prev_output_value + 4);
prev_output_value += 8;
}
state0 = vActiveq_f32<Act>(state0);
state1 = vActiveq_f32<Act>(state1);
float32x4_t output0 = vmlsq_f32(prev0, update0, prev0);
float32x4_t output1 = vmlsq_f32(prev1, update1, prev1);
output0 = vmlaq_f32(output0, update0, state0);
output1 = vmlaq_f32(output1, update1, state1);
vst1q_f32(frame_state, state0);
vst1q_f32(frame_state + 4, state1);
vst1q_f32(output_value, output0);
vst1q_f32(output_value + 4, output1);
update_gate += 8;
frame_state += 8;
output_value += 8;
}
#endif // __ARM_NEON__
for (int i = 0; i < remain; i++) {
r_value_update_gate = update_gate[i]; r_value_update_gate = update_gate[i];
r_value_frame_state = frame_state[i]; r_value_frame_state = frame_state[i];
if (prev_output_value) { if (prev_output_value) {
r_prev_out = prev_output_value[i]; r_prev_out = prev_output_value[i];
} }
r_value_frame_state = Active<Act>(r_value_frame_state);
op_final_output(&r_value_update_gate, &r_value_frame_state, &r_prev_out, r_output = r_prev_out - r_value_update_gate * r_prev_out +
&r_output, active_node); r_value_update_gate * r_value_frame_state;
frame_state[i] = r_value_frame_state; frame_state[i] = r_value_frame_state;
output_value[i] = r_output; output_value[i] = r_output;
} }
} }
template <class OpResetOutput, typename T> #define FORWARD_RESET_OUTPUT(active_type, value, frame_size) \
inline void forward_reset_output(OpResetOutput op_reset_output, hl_naive_gru_forward_reset_output<float, active_type>( \
GRUMetaValue<T> value, int frame_size, value.gate_value, value.reset_output_value, value.prev_out_value, \
int batch_size, ActivationType active_gate) { frame_size);
for (int b = 0; b < batch_size; b++) {
hl_naive_gru_forward_reset_output(
op_reset_output, value.gate_value, value.reset_output_value,
value.prev_out_value, frame_size, active_gate);
template <typename T>
inline void forward_reset_output(GRUMetaValue<T> value, int frame_size,
int batch_size, ActivationType active_node) {
for (int b = 0; b < batch_size; ++b) {
switch (active_node) {
case RELU:
FORWARD_RESET_OUTPUT(RELU, value, frame_size);
break;
case SIGMOID:
FORWARD_RESET_OUTPUT(SIGMOID, value, frame_size);
break;
case TANH:
FORWARD_RESET_OUTPUT(TANH, value, frame_size);
break;
default:
FORWARD_RESET_OUTPUT(IDENTITY, value, frame_size);
}
value.gate_value += frame_size * 3; value.gate_value += frame_size * 3;
value.reset_output_value += frame_size; value.reset_output_value += frame_size;
if (value.prev_out_value) { if (value.prev_out_value) {
...@@ -93,15 +167,27 @@ inline void forward_reset_output(OpResetOutput op_reset_output, ...@@ -93,15 +167,27 @@ inline void forward_reset_output(OpResetOutput op_reset_output,
} }
} }
template <class OpFinalOutput, typename T> #define FORWARD_FINAL_OUTPUT(active_type, value, frame_size) \
inline void forward_final_output(OpFinalOutput op_final_output, hl_naive_gru_forward_final_output<float, active_type>( \
GRUMetaValue<T> value, int frame_size, value.gate_value, value.prev_out_value, value.output_value, frame_size)
int batch_size, ActivationType active_node) {
for (int b = 0; b < batch_size; b++) {
hl_naive_gru_forward_final_output(op_final_output, value.gate_value,
value.prev_out_value, value.output_value,
frame_size, active_node);
template <typename T>
inline void forward_final_output(GRUMetaValue<T> value, int frame_size,
int batch_size, ActivationType active_node) {
for (int b = 0; b < batch_size; ++b) {
switch (active_node) {
case RELU:
FORWARD_FINAL_OUTPUT(RELU, value, frame_size);
break;
case SIGMOID:
FORWARD_FINAL_OUTPUT(SIGMOID, value, frame_size);
break;
case TANH:
FORWARD_FINAL_OUTPUT(TANH, value, frame_size);
break;
default:
FORWARD_FINAL_OUTPUT(IDENTITY, value, frame_size);
}
value.gate_value += frame_size * 3; value.gate_value += frame_size * 3;
value.output_value += frame_size; value.output_value += frame_size;
if (value.prev_out_value) { if (value.prev_out_value) {
...@@ -113,4 +199,5 @@ inline void forward_final_output(OpFinalOutput op_final_output, ...@@ -113,4 +199,5 @@ inline void forward_final_output(OpFinalOutput op_final_output,
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef GRU_OP
#pragma once
#include <type_traits>
#include "operators/math/activation_functions.h"
namespace paddle_mobile {
namespace operators {
namespace math {
namespace forward {
template <typename T>
class gru_resetOutput {
public:
void operator()(T *value_update_gate, T *value_reset_gate, T *prev_out,
T *value_reset_output, ActivationType act_gate) {
*value_update_gate = activation(*value_update_gate, act_gate);
*value_reset_gate = activation(*value_reset_gate, act_gate);
*value_reset_output = (*prev_out) * (*value_reset_gate);
}
};
template <typename T>
class gru_finalOutput {
public:
void operator()(T *value_update_gate, T *value_frame_state, T *prev_out,
T *value_output, ActivationType act_input) {
*value_frame_state = activation(*value_frame_state, act_input);
*value_output = *prev_out - ((*value_update_gate) * (*prev_out)) +
((*value_update_gate) * (*value_frame_state));
}
};
} // namespace forward
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif
...@@ -41,10 +41,10 @@ void set_constant(framework::Tensor *tensor, float value) { ...@@ -41,10 +41,10 @@ void set_constant(framework::Tensor *tensor, float value) {
} }
template <> template <>
void matmul<float>(const framework::Tensor &matrix_a, bool trans_a, void matmul<float, float>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, float alpha, const framework::Tensor &matrix_b, bool trans_b,
framework::Tensor *matrix_out, float beta, bool relu, float alpha, framework::Tensor *matrix_out,
float *bias) { float beta, bool relu, float *bias) {
auto dim_a = matrix_a.dims(); auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims(); auto dim_b = matrix_b.dims();
auto dim_out = matrix_out->dims(); auto dim_out = matrix_out->dims();
......
...@@ -24,24 +24,24 @@ namespace math { ...@@ -24,24 +24,24 @@ namespace math {
void set_constant(framework::Tensor *tensor, float value); void set_constant(framework::Tensor *tensor, float value);
template <typename T> template <typename Itype, typename Otype>
void matmul(const framework::Tensor &matrix_a, bool trans_a, void matmul(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, T alpha, const framework::Tensor &matrix_b, bool trans_b, float alpha,
framework::Tensor *matrix_out, T beta, bool relu = false, framework::Tensor *matrix_out, float beta, bool relu = false,
float *bias = nullptr); Otype *bias = nullptr);
template <typename T, typename S> template <typename Itype, typename Otype>
void matmul(const framework::Tensor &matrix_a, bool trans_a, void matmul(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, T alpha, const framework::Tensor &matrix_b, bool trans_b, float alpha,
framework::Tensor *matrix_out, T beta, bool relu = false, framework::Tensor *matrix_out, float beta, bool relu, Otype *bias,
S *bias = nullptr, bool addOnRow = false); bool addOnRow);
template <typename T> template <typename T>
void matmulWithBn(const framework::Tensor &matrix_a, bool trans_a, void matmulWithBn(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, T alpha, const framework::Tensor &matrix_b, bool trans_b, float alpha,
framework::Tensor *matrix_out, T beta, bool relu, framework::Tensor *matrix_out, float beta, bool relu,
framework::Tensor *new_scale, framework::Tensor *new_bias, framework::Tensor *new_scale, framework::Tensor *new_bias,
int group, float *bias = nullptr); int group, T *bias = nullptr);
void matmulWithPRelu(const framework::Tensor &matrix_a, bool trans_a, void matmulWithPRelu(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, const framework::Tensor &matrix_b, bool trans_b,
......
...@@ -22,9 +22,10 @@ namespace operators { ...@@ -22,9 +22,10 @@ namespace operators {
namespace math { namespace math {
template <> template <>
void matmul(const framework::Tensor &matrix_a, bool trans_a, void matmul<int8_t, int32_t>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b, float alpha, const framework::Tensor &matrix_b, bool trans_b,
framework::Tensor *matrix_out, float beta, bool relu, int32_t *bias, float alpha, framework::Tensor *matrix_out,
float beta, bool relu, int32_t *bias,
bool addOnRow) { bool addOnRow) {
auto dim_a = matrix_a.dims(); auto dim_a = matrix_a.dims();
auto dim_b = matrix_b.dims(); auto dim_b = matrix_b.dims();
...@@ -93,6 +94,16 @@ void matmul(const framework::Tensor &matrix_a, bool trans_a, ...@@ -93,6 +94,16 @@ void matmul(const framework::Tensor &matrix_a, bool trans_a,
#endif #endif
} }
} }
template <>
void matmul<int8_t, int32_t>(const framework::Tensor &matrix_a, bool trans_a,
const framework::Tensor &matrix_b, bool trans_b,
float alpha, framework::Tensor *matrix_out,
float beta, bool relu, int32_t *bias) {
matmul<int8_t, int32_t>(matrix_a, trans_a, matrix_b, trans_b, alpha,
matrix_out, beta, relu, bias, false);
}
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#include "operators/math/pool_2x2.h"
#include <algorithm>
#include <vector>
namespace paddle_mobile {
namespace operators {
namespace math {
#define FLT_MAX __FLT_MAX__
void Pool2x2Maxs2p0(vector<int> strides, vector<int> paddings,
const Tensor *input, Tensor *output) {
const int batch_size = input->dims()[0];
const int input_height = input->dims()[2];
const int input_width = input->dims()[3];
const int output_channels = output->dims()[1];
int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = 2;
const int ksize_width = 2;
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int input_channel_stride = input_height * input_width;
const int output_channel_stride = output_height * output_width;
const int input_batch_stride = output_channels * input_channel_stride;
const int output_batch_stride = output_channels * output_channel_stride;
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
int w1 = input_width / 16;
int _w1 = input_width % 16;
int w2 = _w1 / 4;
int _w2 = _w1 % 4;
for (int i = 0; i < batch_size; ++i) {
for (int c = 0; c < output_channels; ++c) {
for (int ph = 0; ph < input_height; ph += 2) {
const float *in_ptr1 = input_data + i * input_batch_stride +
c * input_channel_stride + ph * input_width;
const float *in_ptr2 = in_ptr1 + input_width;
if (ph != input_height && ph + 1 >= input_height) {
in_ptr2 = static_cast<float *>(
paddle_mobile::memory::Alloc(sizeof(float) * input_width));
memset(static_cast<void *>(const_cast<float *>(in_ptr2)), -FLT_MAX,
sizeof(float) * input_width);
}
float *out_ptr = output_data + i * output_batch_stride +
c * output_channel_stride + ph / 2 * output_width;
#if __ARM_NEON
#if __aarch64__
#else
asm volatile(
"subs %[w1], %[w1], #1 \n\t"
"blt end_w1_%= \n\t"
"loop_w1_%=: \n\t"
"pld [%[in_ptr1], #64] \n\t"
"pld [%[in_ptr2], #64] \n\t"
"vld1.f32 {q0, q1}, [%[in_ptr1]]! \n\t"
"vld1.f32 {q2, q3}, [%[in_ptr2]]! \n\t"
"vld1.f32 {q6, q7}, [%[in_ptr1]]! \n\t"
"vld1.f32 {q8, q9}, [%[in_ptr2]]! \n\t"
"vmax.f32 q0, q0, q2 \n\t"
"vmax.f32 q1, q1, q3 \n\t"
"vmax.f32 q6, q6, q8 \n\t"
"vmax.f32 q7, q7, q9 \n\t"
"vpmax.f32 d8, d0, d1 \n\t"
"vpmax.f32 d9, d2, d3 \n\t"
"vpmax.f32 d10, d12, d13 \n\t"
"vpmax.f32 d11, d14, d15 \n\t"
"vst1.32 {q4, q5}, [%[out_ptr]]! \n\t"
"subs %[w1], %[w1], #1 \n\t"
"bge loop_w1_%= \n\t"
"end_w1_%=: \n\t"
"subs %[w2], %[w2], #1 \n\t"
"blt end_w2_%= \n\t"
"loop_w2_%=: \n\t"
"vld1.f32 {q0}, [%[in_ptr1]]! \n\t"
"vld1.f32 {q1}, [%[in_ptr2]]! \n\t"
"vmax.f32 q0, q0, q1 \n\t"
"vpmax.f32 d4, d0, d1 \n\t"
"vst1.32 {d4}, [%[out_ptr]]! \n\t"
"subs %[w2], %[w2], #1 \n\t"
"bge loop_w2_%= \n\t"
"end_w2_%=: \n\t"
:
: [w1] "r"(w1), [w2] "r"(w2), [in_ptr1] "r"(in_ptr1),
[in_ptr2] "r"(in_ptr2), [out_ptr] "r"(out_ptr)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8",
"q9");
#endif
#endif
if (_w2 != 0) {
in_ptr1 = input_data + i * input_batch_stride +
c * input_channel_stride + ph * input_width + 16 * w1 +
4 * w2;
in_ptr2 = in_ptr1 + input_width;
out_ptr = output_data + i * output_batch_stride +
c * output_channel_stride + ph / 2 * output_width + 8 * w1 +
2 * w2;
if (_w2 == 1) {
*out_ptr = (*in_ptr1 > *in_ptr2) ? *in_ptr1 : *in_ptr2;
} else if (_w2 == 2) {
float temp = (*in_ptr1 > *in_ptr2) ? *in_ptr1 : *in_ptr2;
in_ptr1++;
in_ptr2++;
float temp1 = (*in_ptr1 > *in_ptr2) ? *in_ptr1 : *in_ptr2;
*out_ptr = (temp > temp1) ? temp : temp1;
} else if (_w2 == 3) {
float temp = (*in_ptr1 > *in_ptr2) ? *in_ptr1 : *in_ptr2;
in_ptr1++;
in_ptr2++;
float temp1 = (*in_ptr1 > *in_ptr2) ? *in_ptr1 : *in_ptr2;
in_ptr1++;
in_ptr2++;
*out_ptr = (temp > temp1) ? temp : temp1;
out_ptr++;
*out_ptr = (*in_ptr1 > *in_ptr2) ? *in_ptr1 : *in_ptr2;
}
}
}
}
}
}
void Pool2x2Avgs2p0(vector<int> strides, vector<int> paddings,
const Tensor *input, Tensor *output) {
const int batch_size = input->dims()[0];
const int input_height = input->dims()[2];
const int input_width = input->dims()[3];
const int output_channels = output->dims()[1];
int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int ksize_height = 2;
const int ksize_width = 2;
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int input_channel_stride = input_height * input_width;
const int output_channel_stride = output_height * output_width;
const int input_batch_stride = output_channels * input_channel_stride;
const int output_batch_stride = output_channels * output_channel_stride;
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
int w1 = input_width / 16;
int _w1 = input_width % 16;
int w2 = _w1 / 4;
int _w2 = _w1 % 4;
float quarter = 0.25;
for (int i = 0; i < batch_size; ++i) {
for (int c = 0; c < output_channels; ++c) {
for (int ph = 0; ph < input_height; ph += 2) {
const float *in_ptr1 = input_data + i * input_batch_stride +
c * input_channel_stride + ph * input_width;
const float *in_ptr2 = in_ptr1 + input_width;
if (ph + 1 >= input_height) {
in_ptr2 = static_cast<float *>(
paddle_mobile::memory::Alloc(sizeof(float) * input_width));
memset(static_cast<void *>(const_cast<float *>(in_ptr2)), 0,
sizeof(float) * input_width);
}
float *out_ptr = output_data + i * output_batch_stride +
c * output_channel_stride + ph / 2 * output_width;
#if __ARM_NEON
#if __aarch64__
#else
asm volatile(
"subs %[w1], %[w1], #1 \n\t"
"blt end_w1_%= \n\t"
"loop_w1_%=: \n\t"
"pld [%[in_ptr1], #64] \n\t"
"pld [%[in_ptr2], #64] \n\t"
"vmov.f32 d0[0], %[quarter] \n\t"
"vld1.f32 {q1, q2}, [%[in_ptr1]]! \n\t"
"vld1.f32 {q3, q4}, [%[in_ptr2]]! \n\t"
"vld1.f32 {q7, q8}, [%[in_ptr1]]! \n\t"
"vld1.f32 {q9, q10}, [%[in_ptr2]]! \n\t"
"vadd.f32 q1, q1, q3 \n\t"
"vadd.f32 q2, q2, q4 \n\t"
"vadd.f32 q7, q7, q9 \n\t"
"vadd.f32 q8, q8, q10 \n\t"
"vpadd.f32 d10, d2, d3 \n\t"
"vpadd.f32 d11, d4, d5 \n\t"
"vpadd.f32 d12, d14, d15 \n\t"
"vpadd.f32 d13, d16, d17 \n\t"
"vmul.f32 q5, q5, d0[0] \n\t"
"vmul.f32 q6, q6, d0[0] \n\t"
"vst1.32 {q5, q6}, [%[out_ptr]]! \n\t"
"subs %[w1], %[w1], #1 \n\t"
"bge loop_w1_%= \n\t"
"end_w1_%=: \n\t"
"subs %[w2], %[w2], #1 \n\t"
"blt end_w2_%= \n\t"
"loop_w2_%=: \n\t"
"vld1.f32 {q1}, [%[in_ptr1]]! \n\t"
"vld1.f32 {q2}, [%[in_ptr2]]! \n\t"
"vadd.f32 q1, q1, q2 \n\t"
"vpadd.f32 d4, d2, d3 \n\t"
"vmul.f32 d4, d4, d0[0] \n\t"
"vst1.32 {d4}, [%[out_ptr]]! \n\t"
"subs %[w2], %[w2], #1 \n\t"
"bge loop_w2_%= \n\t"
"end_w2_%=: \n\t"
:
: [w1] "r"(w1), [w2] "r"(w2), [in_ptr1] "r"(in_ptr1),
[in_ptr2] "r"(in_ptr2), [out_ptr] "r"(out_ptr),
[quarter] "r"(quarter)
: "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8",
"q9", "q10");
#endif
#endif
if (_w2 != 0) {
in_ptr1 = input_data + i * input_batch_stride +
c * input_channel_stride + ph * input_width + 16 * w1 +
4 * w2;
in_ptr2 = in_ptr1 + input_width;
out_ptr = output_data + i * output_batch_stride +
c * output_channel_stride + ph / 2 * output_width + 8 * w1 +
2 * w2;
if (_w2 == 1) {
*out_ptr = 0.5 * (*in_ptr1 + *in_ptr2);
} else if (_w2 == 2) {
float temp = 0;
temp += *in_ptr1;
temp += *in_ptr2;
in_ptr1++;
in_ptr2++;
temp += *in_ptr1;
temp += *in_ptr2;
*out_ptr = 0.25 * temp;
} else if (_w2 == 3) {
float temp = 0;
temp += *in_ptr1++;
temp += *in_ptr2++;
temp += *in_ptr1++;
temp += *in_ptr2++;
*out_ptr = 0.25 * temp;
out_ptr++;
*out_ptr = 0.5 * (*in_ptr1 + *in_ptr2);
}
}
}
}
}
}
//}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#pragma once
#include "framework/tensor.h"
#ifdef __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
namespace paddle_mobile {
namespace operators {
namespace math {
using framework::Tensor;
using std::vector;
void Pool2x2Maxs2p0(vector<int> strides, vector<int> paddings,
const Tensor *input, Tensor *output);
void Pool2x2Avgs2p0(vector<int> strides, vector<int> paddings,
const Tensor *in_x, Tensor *out);
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#ifdef _OPENMP
#include <omp.h>
#endif
#include "framework/tensor.h"
#include "operators/math/pool_3x3.h"
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
#include <climits>
namespace paddle_mobile {
namespace operators {
namespace math {
using framework::Tensor;
using std::max;
using std::min;
using std::vector;
void Pool3x3Avgs1p1(const Tensor *input, Tensor *output) {
#if __ARM_NEON
const int batch_size = static_cast<int>(input->dims()[0]);
const int input_channel = static_cast<int>(input->dims()[1]);
const int input_height = static_cast<int>(input->dims()[2]);
const int input_width = static_cast<int>(input->dims()[3]);
const int output_height = static_cast<int>(output->dims()[2]);
const int output_width = static_cast<int>(output->dims()[3]);
output->mutable_data<float>();
const int hxw = input_height * input_width;
const int l = input_height;
const float coef = 1.0 / 9.0;
const float coef1 = 1.0 / 6.0;
const float coef2 = 1.0 / 4.0;
float32x4_t v_coef = vdupq_n_f32(coef);
float32x4_t v_coef1 = vdupq_n_f32(coef1);
for (int b = 0; b < batch_size; b++) {
#pragma omp parallel for
for (int c = 0; c < input_channel; c++) {
const float *input_data = input->data<float>() + c * hxw;
float *output_data = output->data<float>() + c * hxw;
for (int i = 1; i < output_height - 1; i++) {
float *output_ptr;
float32x4_t in0, in1, in2, in3, in4, in5, tmp0, tmp1, tmp2, tmp3, tmp4,
tmp5, out0;
for (int m = 1; m < output_width - 4; m += 4) {
output_ptr = output_data + i * output_width + m;
in0 = vld1q_f32(input_data + (i - 1) * input_width + m - 1);
in1 = vld1q_f32(input_data + (i - 1) * input_width + m + 3);
in2 = vld1q_f32(input_data + i * input_width + m - 1);
in3 = vld1q_f32(input_data + i * input_width + m + 3);
in4 = vld1q_f32(input_data + (i + 1) * input_width + m - 1);
in5 = vld1q_f32(input_data + (i + 1) * input_width + m + 3);
tmp0 = vextq_f32(in0, in1, 1);
tmp1 = vextq_f32(in0, in1, 2);
tmp2 = vextq_f32(in2, in3, 1);
tmp3 = vextq_f32(in2, in3, 2);
tmp4 = vextq_f32(in4, in5, 1);
tmp5 = vextq_f32(in4, in5, 2);
out0 = in0;
out0 = vaddq_f32(out0, tmp0);
out0 = vaddq_f32(out0, tmp1);
out0 = vaddq_f32(out0, in2);
out0 = vaddq_f32(out0, tmp2);
out0 = vaddq_f32(out0, tmp3);
out0 = vaddq_f32(out0, in4);
out0 = vaddq_f32(out0, tmp4);
out0 = vaddq_f32(out0, tmp5);
vst1q_f32(output_ptr, vmulq_f32(out0, v_coef));
}
int m;
for (m = 1; (m + 3) < output_width - 1; m = m + 4) {
}
for (int j = m; j < output_width - 1; j++) {
output_data[i * output_width + j] =
input_data[(i - 1) * input_width + j - 1] +
input_data[(i - 1) * input_width + j] +
input_data[(i - 1) * input_width + j + 1] +
input_data[(i)*input_width + j - 1] +
input_data[(i)*input_width + j] +
input_data[(i)*input_width + j + 1] +
input_data[(i + 1) * input_width + j - 1] +
input_data[(i + 1) * input_width + j] +
input_data[(i + 1) * input_width + j + 1];
output_data[i * output_width + j] =
output_data[i * output_width + j] * coef;
}
}
output_data[0] =
input_data[0] + input_data[1] + input_data[l] + input_data[l + 1];
output_data[l - 1] = input_data[l - 2] + input_data[l - 1] +
input_data[2 * l - 2] + input_data[2 * l - 1];
output_data[(l - 1) * l] =
input_data[(l - 2) * l] + input_data[(l - 2) * l + 1] +
input_data[(l - 1) * l] + input_data[(l - 1) * l + 1];
output_data[l * l - 1] = input_data[(l - 2) * (l + 1)] +
input_data[(l - 2) * (l + 1) + 1] +
input_data[l * l - 2] + input_data[l * l - 1];
output_data[0] = output_data[0] * coef2;
output_data[l - 1] = output_data[l - 1] * coef2;
output_data[(l - 1) * l] = output_data[(l - 1) * l] * coef2;
output_data[l * l - 1] = output_data[l * l - 1] * coef2;
for (int i = 1; i < l - 1; ++i) {
output_data[i * l] = input_data[i * l - l] + input_data[i * l - l + 1] +
input_data[i * l] + input_data[i * l + 1] +
input_data[i * l + l] + input_data[i * l + l + 1];
output_data[i * l + l - 1] =
input_data[i * l + l - 1 - l - 1] + input_data[i * l + l - 1 - l] +
input_data[i * l + l - 1 - 1] + input_data[i * l + l - 1] +
input_data[i * l + l - 1 + l - 1] + input_data[i * l + l - 1 + l];
output_data[i * l] = output_data[i * l] * coef1;
output_data[i * l + l - 1] = output_data[i * l + l - 1] * coef1;
}
int m;
for (m = 1; m < output_width - 4; m += 4) {
float *output_ptr = output_data + m;
float32x4_t in0, in1, in2, in3, tmp0, tmp1, tmp2, tmp3, out0;
in0 = vld1q_f32(input_data + m - 1);
in1 = vld1q_f32(input_data + m + 3);
in2 = vld1q_f32(input_data + input_width + m - 1);
in3 = vld1q_f32(input_data + input_width + m + 3);
tmp0 = vextq_f32(in0, in1, 1);
tmp1 = vextq_f32(in0, in1, 2);
tmp2 = vextq_f32(in2, in3, 1);
tmp3 = vextq_f32(in2, in3, 2);
out0 = in0;
out0 = vaddq_f32(out0, tmp0);
out0 = vaddq_f32(out0, tmp1);
out0 = vaddq_f32(out0, in2);
out0 = vaddq_f32(out0, tmp2);
out0 = vaddq_f32(out0, tmp3);
vst1q_f32(output_ptr, vmulq_f32(out0, v_coef1));
}
for (m = 1; (m + 3) < output_width - 1; m += 4) {
}
for (int j = m; j < output_width - 1; j++) {
output_data[j] = input_data[j - 1] + input_data[j] + input_data[j + 1] +
input_data[input_width + j - 1] +
input_data[input_width + j] +
input_data[input_width + j + 1];
output_data[j] = output_data[j] * coef1;
}
for (m = 1; m < output_width - 4; m += 4) {
float *output_ptr =
output_data + (output_height - 1) * output_width + m;
float32x4_t in0, in1, in2, in3, tmp0, tmp1, tmp2, tmp3, out0;
in0 = vld1q_f32(input_data + (output_height - 2) * input_width + m - 1);
in1 = vld1q_f32(input_data + (output_height - 2) * input_width + m + 3);
in2 = vld1q_f32(input_data + (output_height - 1) * input_width + m - 1);
in3 = vld1q_f32(input_data + (output_height - 1) * input_width + m + 3);
tmp0 = vextq_f32(in0, in1, 1);
tmp1 = vextq_f32(in0, in1, 2);
tmp2 = vextq_f32(in2, in3, 1);
tmp3 = vextq_f32(in2, in3, 2);
out0 = in0;
out0 = vaddq_f32(out0, tmp0);
out0 = vaddq_f32(out0, tmp1);
out0 = vaddq_f32(out0, in2);
out0 = vaddq_f32(out0, tmp2);
out0 = vaddq_f32(out0, tmp3);
vst1q_f32(output_ptr, vmulq_f32(out0, v_coef1));
}
for (m = 1; (m + 3) < output_width - 1; m = m + 4) {
}
for (int j = m; j < output_width - 1; j++) {
output_data[(output_height - 1) * input_width + j] =
input_data[(output_height - 2) * input_width + j - 1] +
input_data[(output_height - 2) * input_width + j] +
input_data[(output_height - 2) * input_width + j + 1] +
input_data[(output_height - 1) * input_width + j - 1] +
input_data[(output_height - 1) * input_width + j] +
input_data[(output_height - 1) * input_width + j + 1];
output_data[(output_height - 1) * output_width + j] =
output_data[(output_height - 1) * output_width + j] * coef1;
}
}
}
// const int batch_size = input->dims()[0];
//
// const int h_in = input->dims()[2];
//
// const int w_in = input->dims()[3];
//
// const int output_channels = output->dims()[1];
//
// const int h_out = output->dims()[2];
// const int w_out = output->dims()[3];
// const int outputdata_channel_stride = h_out * w_out;
// const int inputdata_channel_stride = h_in * w_in;
// const int input_batch_stride = output_channels * inputdata_channel_stride;
// const int output_batch_stride = output_channels *
// outputdata_channel_stride; float *out_data = output->data<float>(); const
// float *input_data = input->data<float>();
//
// const float coef = 1.0 / 9.0;
// for (int k = 0; k < batch_size; ++k) {
// #pragma omp parallel for
// for (int c = 0; c < output_channels; ++c) {
// const float *input_seg = input_data + c * inputdata_channel_stride;
// float *output_seg = out_data + c * outputdata_channel_stride;
// // four corner point
// output_seg[0] = (input_seg[0] + input_seg[1] + input_seg[w_in] +
// input_seg[w_in + 1]) *
// coef;
// output_seg[w_out - 1] =
// (input_seg[w_in - 2] + input_seg[w_in - 1] + input_seg[w_in * 2 -
// 2] +
// input_seg[2 * w_in - 1]) *
// coef;
// output_seg[(h_out - 1) * w_out] =
// (input_seg[(h_in - 2) * w_in] + input_seg[(h_in - 2) * w_in + 1] +
// input_seg[(h_in - 1) * w_in] + input_seg[(h_in - 1) * w_in + 1])
// *
// coef;
// output_seg[h_out * w_out - 1] =
// (input_seg[h_in * w_in - 1] + input_seg[h_in * w_in - 2] +
// input_seg[(h_in - 1) * w_in - 1] +
// input_seg[(h_in - 1) * w_in - 2]) *
// coef;
// // left side & right side
// for (int i = 1; i < h_in - 1; ++i) {
// output_seg[i * w_out] =
// (input_seg[i * w_in - w_in] + input_seg[i * w_in - w_in + 1] +
// input_seg[i * w_in] + input_seg[i * w_in + 1] +
// input_seg[i * w_in + w_in] + input_seg[i * w_in + w_in + 1]) *
// coef;
// output_seg[i * w_out + w_out - 1] =
// (input_seg[i * w_in - w_in + w_in - 2] +
// input_seg[i * w_in - w_in + 1 + w_in - 2] +
// input_seg[i * w_in + w_in - 2] +
// input_seg[i * w_in + 1 + w_in - 2] +
// input_seg[i * w_in + w_in + w_in - 2] +
// input_seg[i * w_in + w_in + 1 + w_in - 2]) *
// coef;
// }
// // top 1 row & bottom 1 row
// const float *input_tmp = input_seg;
//
// float32x4_t in0, in1, in2, in3, in4, in5, in6, in7, tmp0, tmp1, tmp2,
// tmp3, tmp4, tmp5, sum, out0;
// float32x4_t v_coef = vdupq_n_f32(coef);
// in0 = vld1q_f32(input_tmp);
// in2 = vld1q_f32(input_tmp + w_in);
// const float *input_tmp_end = input_tmp + (h_in - 2) * w_in;
// in4 = vld1q_f32(input_tmp_end);
// in6 = vld1q_f32(input_tmp_end + w_in);
// int c_mid = w_out - 2;
// auto output_ptr = output_seg + 1;
// for (; c_mid > 3; c_mid -= 4) {
// in1 = vld1q_f32(input_tmp + 4);
// in3 = vld1q_f32(input_tmp + w_in + 4);
//
// tmp0 = vextq_f32(in0, in1, 1);
// tmp1 = vextq_f32(in0, in1, 2);
//
// tmp2 = vextq_f32(in2, in3, 1);
// tmp3 = vextq_f32(in2, in3, 2);
//
// sum = vaddq_f32(in0, tmp0);
// sum = vaddq_f32(sum, tmp1);
// sum = vaddq_f32(sum, in2);
// sum = vaddq_f32(sum, tmp2);
// sum = vaddq_f32(sum, tmp3);
//
// vst1q_f32(output_ptr, vmulq_f32(sum, v_coef));
//
// in5 = vld1q_f32(input_tmp_end + 4);
// in7 = vld1q_f32(input_tmp_end + w_in + 4);
//
// tmp0 = vextq_f32(in4, in5, 1);
// tmp1 = vextq_f32(in4, in5, 2);
// tmp2 = vextq_f32(in6, in7, 1);
// tmp3 = vextq_f32(in6, in7, 2);
//
// sum = vaddq_f32(in0, tmp0);
// sum = vaddq_f32(sum, tmp1);
// sum = vaddq_f32(sum, in2);
// sum = vaddq_f32(sum, tmp2);
// sum = vaddq_f32(sum, tmp3);
//
// vst1q_f32(output_ptr + (h_out - 1) * w_out, vmulq_f32(sum, v_coef));
//
// // can optimize to each 8 stride.
// input_tmp += 4;
// input_tmp_end += 4;
// output_ptr += 4;
// in0 = in1;
// in2 = in3;
// in4 = in5;
// in6 = in7;
// }
// // top right remain
// float32x4_t pad0 = vdupq_n_f32(input_seg[w_in - 1]);
// float32x4_t pad1 = vdupq_n_f32(input_seg[2 * w_in - 1]);
//
// tmp0 = vextq_f32(in0, pad0, 1);
// tmp1 = vextq_f32(in0, pad0, 2);
// tmp2 = vextq_f32(in2, pad1, 2);
// tmp3 = vextq_f32(in2, pad1, 2);
//
// sum = vaddq_f32(in0, tmp0);
// sum = vaddq_f32(sum, tmp1);
// sum = vaddq_f32(sum, in2);
// sum = vaddq_f32(sum, tmp2);
// sum = vaddq_f32(sum, tmp3);
// out0 = vmulq_f32(sum, v_coef);
//
// for (int i = 0; i < c_mid; ++i) {
// if (i == 0) {
// vst1q_lane_f32(output_ptr + i, out0, 0);
// }
// if (i == 1) {
// vst1q_lane_f32(output_ptr + i, out0, 1);
// }
// if (i == 2) {
// vst1q_lane_f32(output_ptr + i, out0, 2);
// }
// }
//
// // bottom_right remain
// float32x4_t pad2 = vdupq_n_f32(input_seg[(h_in - 1) * w_in - 1]);
// float32x4_t pad3 = vdupq_n_f32(input_seg[h_in * w_in - 1]);
//
// tmp0 = vextq_f32(in4, pad2, 1);
// tmp1 = vextq_f32(in4, pad2, 2);
// tmp2 = vextq_f32(in6, pad3, 2);
// tmp3 = vextq_f32(in6, pad3, 2);
//
// sum = vaddq_f32(in4, tmp0);
// sum = vaddq_f32(sum, tmp1);
// sum = vaddq_f32(sum, in6);
// sum = vaddq_f32(sum, tmp2);
// sum = vaddq_f32(sum, tmp3);
// out0 = vmulq_f32(sum, v_coef);
//
// for (int i = 0; i < c_mid; ++i) {
// if (i == 0) {
// vst1q_lane_f32(output_ptr + (h_out - 1) * w_out + i, out0, 0);
// }
// if (i == 1) {
// vst1q_lane_f32(output_ptr + (h_out - 1) * w_out + i, out0, 1);
// }
// if (i == 2) {
// vst1q_lane_f32(output_ptr + (h_out - 1) * w_out + i, out0, 2);
// }
// }
// // mid
// for (int j = 0; j < h_out - 2; ++j) {
// output_ptr = output_seg + w_out * (j + 1) + 1;
// input_tmp = input_seg + j * w_in;
//
// in0 = vld1q_f32(input_tmp);
// in2 = vld1q_f32(input_tmp + w_in);
// in4 = vld1q_f32(input_tmp + 2 * w_in);
// c_mid = w_out - 2;
// for (; c_mid > 3; c_mid -= 4) {
// in1 = vld1q_f32(input_tmp + 4);
// in3 = vld1q_f32(input_tmp + w_in + 4);
// in5 = vld1q_f32(input_tmp + 2 * w_in + 4);
//
// tmp0 = vextq_f32(in0, in1, 1);
// tmp1 = vextq_f32(in0, in1, 2);
// tmp2 = vextq_f32(in2, in3, 1);
// tmp3 = vextq_f32(in2, in3, 2);
// tmp4 = vextq_f32(in4, in5, 1);
// tmp5 = vextq_f32(in4, in5, 2);
//
// sum = vaddq_f32(in0, tmp0);
// sum = vaddq_f32(sum, tmp1);
// sum = vaddq_f32(sum, in2);
// sum = vaddq_f32(sum, tmp2);
// sum = vaddq_f32(sum, tmp3);
// sum = vaddq_f32(sum, in4);
// sum = vaddq_f32(sum, tmp4);
// sum = vaddq_f32(sum, tmp5);
//
// out0 = vmulq_f32(sum, v_coef);
// vst1q_f32(output_ptr, out0);
// output_ptr += 4;
// input_tmp += 4;
// in0 = in1;
// in2 = in3;
// in4 = in5;
// }
// // mid remain
// float32x4_t pad0 = vdupq_n_f32(input_seg[(j + 1) * w_in - 1]);
// float32x4_t pad1 = vdupq_n_f32(input_seg[(j + 2) * w_in - 1]);
// float32x4_t pad2 = vdupq_n_f32(input_seg[(j + 2) * w_in - 1]);
//
// tmp0 = vextq_f32(in0, pad0, 1);
// tmp1 = vextq_f32(in0, pad0, 2);
// tmp2 = vextq_f32(in2, pad1, 1);
// tmp3 = vextq_f32(in2, pad1, 2);
// tmp4 = vextq_f32(in4, pad2, 1);
// tmp5 = vextq_f32(in4, pad2, 2);
//
// sum = vaddq_f32(in0, tmp0);
// sum = vaddq_f32(sum, tmp1);
// sum = vaddq_f32(sum, in2);
// sum = vaddq_f32(sum, tmp2);
// sum = vaddq_f32(sum, tmp3);
// sum = vaddq_f32(sum, in4);
// sum = vaddq_f32(sum, tmp4);
// sum = vaddq_f32(sum, tmp5);
// out0 = vmulq_f32(sum, v_coef);
//
// for (int i = 0; i < c_mid; ++i) {
// if (i == 0) {
// vst1q_lane_f32(output_ptr + i, out0, 0);
// }
// if (i == 1) {
// vst1q_lane_f32(output_ptr + i, out0, 1);
// }
// if (i == 2) {
// vst1q_lane_f32(output_ptr + i, out0, 2);
// }
// }
// }
// // input_data += inputdata_channel_stride;
// // out_data += outputdata_channel_stride;
// }
// input_data += input_batch_stride;
// out_data += output_batch_stride;
// }
#endif
}
void Pool3x3Maxs1p1(const Tensor *input, Tensor *output) {
#if __ARM_NEON
const int batch_size = input->dims()[0];
const int h_in = input->dims()[2];
const int w_in = input->dims()[3];
const int output_channels = output->dims()[1];
const int h_out = output->dims()[2];
const int w_out = output->dims()[3];
const int outputdata_channel_stride = h_out * w_out;
const int inputdata_channel_stride = h_in * w_in;
const int input_batch_stride = output_channels * inputdata_channel_stride;
const int output_batch_stride = output_channels * outputdata_channel_stride;
float *out_data = output->mutable_data<float>();
const float *input_data = input->data<float>();
for (int k = 0; k < batch_size; ++k) {
#pragma omp parallel for
for (int c = 0; c < output_channels; ++c) {
const float *input_seg = input_data + c * inputdata_channel_stride;
float *output_seg = out_data + c * outputdata_channel_stride;
// four corner point
output_seg[0] = std::max(std::max(input_seg[0], input_seg[1]),
std::max(input_seg[w_in], input_seg[w_in + 1]));
output_seg[w_out - 1] =
std::max(std::max(input_seg[w_in - 2], input_seg[w_in - 1]),
std::max(input_seg[w_in * 2 - 2], input_seg[2 * w_in - 1]));
output_seg[(h_out - 1) * w_out] =
std::max(std::max(input_seg[(h_in - 2) * w_in],
input_seg[(h_in - 2) * w_in + 1]),
std::max(input_seg[(h_in - 1) * w_in],
input_seg[(h_in - 1) * w_in + 1]));
output_seg[h_out * w_out - 1] = std::max(
std::max(input_seg[(h_in - 1) * w_in - 1],
input_seg[(h_in - 1) * w_in - 2]),
std::max(input_seg[h_in * w_in - 1], input_seg[h_in * w_in - 2]));
// left side & right side
for (int i = 1; i < h_in - 1; ++i) {
float max1 = std::max(input_seg[i * w_in - w_in],
input_seg[i * w_in - w_in + 1]);
float max2 = std::max(input_seg[i * w_in], input_seg[i * w_in + 1]);
float max3 = std::max(input_seg[i * w_in + w_in],
input_seg[i * w_in + w_in + 1]);
output_seg[i * w_out] = std::max(std::max(max1, max2), max3);
max1 = std::max(input_seg[i * w_in - w_in + w_in - 2],
input_seg[i * w_in - w_in + 1 + w_in - 2]);
max2 = std::max(input_seg[i * w_in + w_in - 2],
input_seg[i * w_in + 1 + w_in - 2]);
max3 = std::max(input_seg[i * w_in + w_in + w_in - 2],
input_seg[i * w_in + w_in + 1 + w_in - 2]);
output_seg[i * w_out + w_out - 1] =
std::max(std::max(max1, max2), max3);
}
// top 1 row & bottom 1 row
const float *input_tmp = input_seg;
float32x4_t in0, in1, in2, in3, in4, in5, in6, in7, tmp0, tmp1, tmp2,
tmp3, tmp4, tmp5, max;
in0 = vld1q_f32(input_tmp);
in2 = vld1q_f32(input_tmp + w_in);
const float *input_tmp_end = input_tmp + (h_in - 2) * w_in;
in4 = vld1q_f32(input_tmp_end);
in6 = vld1q_f32(input_tmp_end + w_in);
int c_mid = w_out - 2;
auto output_ptr = output_seg + 1;
for (; c_mid > 3; c_mid -= 4) {
in1 = vld1q_f32(input_tmp + 4);
in3 = vld1q_f32(input_tmp + w_in + 4);
tmp0 = vextq_f32(in0, in1, 1);
tmp1 = vextq_f32(in0, in1, 2);
tmp2 = vextq_f32(in2, in3, 1);
tmp3 = vextq_f32(in2, in3, 2);
max = vmaxq_f32(in0, tmp0);
max = vmaxq_f32(max, tmp1);
max = vmaxq_f32(max, in2);
max = vmaxq_f32(max, tmp2);
max = vmaxq_f32(max, tmp3);
vst1q_f32(output_ptr, max);
in5 = vld1q_f32(input_tmp_end + 4);
in7 = vld1q_f32(input_tmp_end + w_in + 4);
tmp0 = vextq_f32(in4, in5, 1);
tmp1 = vextq_f32(in4, in5, 2);
tmp2 = vextq_f32(in6, in7, 1);
tmp3 = vextq_f32(in6, in7, 2);
max = vmaxq_f32(in4, tmp0);
max = vmaxq_f32(max, tmp1);
max = vmaxq_f32(max, in6);
max = vmaxq_f32(max, tmp2);
max = vmaxq_f32(max, tmp3);
vst1q_f32(output_ptr + (h_out - 1) * w_out, max);
input_tmp += 4;
input_tmp_end += 4;
output_ptr += 4;
in0 = in1;
in2 = in3;
in4 = in5;
in6 = in7;
}
// top right remain
float32x4_t pad0 = vdupq_n_f32(input_seg[w_in - 1]);
float32x4_t pad1 = vdupq_n_f32(input_seg[2 * w_in - 1]);
tmp0 = vextq_f32(in0, pad0, 1);
tmp1 = vextq_f32(in0, pad0, 2);
tmp2 = vextq_f32(in2, pad1, 1);
tmp3 = vextq_f32(in2, pad1, 2);
max = vmaxq_f32(in0, tmp0);
max = vmaxq_f32(max, tmp1);
max = vmaxq_f32(max, in2);
max = vmaxq_f32(max, tmp2);
max = vmaxq_f32(max, tmp3);
for (int i = 0; i < c_mid; ++i) {
if (i == 0) {
vst1q_lane_f32(output_ptr + i, max, 0);
}
if (i == 1) {
vst1q_lane_f32(output_ptr + i, max, 1);
}
if (i == 2) {
vst1q_lane_f32(output_ptr + i, max, 2);
}
}
// bottom_right remain
float32x4_t pad2 = vdupq_n_f32(input_seg[(h_in - 1) * w_in - 1]);
float32x4_t pad3 = vdupq_n_f32(input_seg[h_in * w_in - 1]);
tmp0 = vextq_f32(in4, pad2, 1);
tmp1 = vextq_f32(in4, pad2, 2);
tmp2 = vextq_f32(in6, pad3, 1);
tmp3 = vextq_f32(in6, pad3, 2);
max = vmaxq_f32(in4, tmp0);
max = vmaxq_f32(max, tmp1);
max = vmaxq_f32(max, in6);
max = vmaxq_f32(max, tmp2);
max = vmaxq_f32(max, tmp3);
for (int i = 0; i < c_mid; ++i) {
if (i == 0) {
vst1q_lane_f32(output_ptr + (h_out - 1) * w_out + i, max, 0);
}
if (i == 1) {
vst1q_lane_f32(output_ptr + (h_out - 1) * w_out + i, max, 1);
}
if (i == 2) {
vst1q_lane_f32(output_ptr + (h_out - 1) * w_out + i, max, 2);
}
}
// mid
for (int j = 0; j < h_out - 2; ++j) {
output_ptr = output_seg + (j + 1) * w_out + 1;
input_tmp = input_seg + j * w_in;
in0 = vld1q_f32(input_tmp);
in2 = vld1q_f32(input_tmp + w_in);
in4 = vld1q_f32(input_tmp + 2 * w_in);
c_mid = w_out - 2;
for (; c_mid > 3; c_mid -= 4) {
in1 = vld1q_f32(input_tmp + 4);
in3 = vld1q_f32(input_tmp + w_in + 4);
in5 = vld1q_f32(input_tmp + 2 * w_in + 4);
tmp0 = vextq_f32(in0, in1, 1);
tmp1 = vextq_f32(in0, in1, 2);
tmp2 = vextq_f32(in2, in3, 1);
tmp3 = vextq_f32(in2, in3, 2);
tmp4 = vextq_f32(in4, in5, 1);
tmp5 = vextq_f32(in4, in5, 2);
max = vmaxq_f32(in0, tmp0);
max = vmaxq_f32(max, tmp1);
max = vmaxq_f32(max, in2);
max = vmaxq_f32(max, tmp2);
max = vmaxq_f32(max, tmp3);
max = vmaxq_f32(max, in4);
max = vmaxq_f32(max, tmp4);
max = vmaxq_f32(max, tmp5);
vst1q_f32(output_ptr, max);
output_ptr += 4;
input_tmp += 4;
in0 = in1;
in2 = in3;
in4 = in5;
}
// mid remain
float32x4_t pad0 = vdupq_n_f32(input_seg[(j + 1) * w_in - 1]);
float32x4_t pad1 = vdupq_n_f32(input_seg[(j + 2) * w_in - 1]);
float32x4_t pad2 = vdupq_n_f32(input_seg[(j + 3) * w_in - 1]);
tmp0 = vextq_f32(in0, pad0, 1);
tmp1 = vextq_f32(in0, pad0, 2);
tmp2 = vextq_f32(in2, pad1, 1);
tmp3 = vextq_f32(in2, pad1, 2);
tmp4 = vextq_f32(in4, pad2, 1);
tmp5 = vextq_f32(in4, pad2, 2);
max = vmaxq_f32(in0, tmp0);
max = vmaxq_f32(max, tmp1);
max = vmaxq_f32(max, in2);
max = vmaxq_f32(max, tmp2);
max = vmaxq_f32(max, tmp3);
max = vmaxq_f32(max, in4);
max = vmaxq_f32(max, tmp4);
max = vmaxq_f32(max, tmp5);
for (int i = 0; i < c_mid; ++i) {
if (i == 0) {
vst1q_lane_f32(output_ptr + i, max, 0);
}
if (i == 1) {
vst1q_lane_f32(output_ptr + i, max, 1);
}
if (i == 2) {
vst1q_lane_f32(output_ptr + i, max, 2);
}
}
}
// input_data += inputdata_channel_stride;
// out_data += outputdata_channel_stride;
}
input_data += input_batch_stride;
out_data += output_batch_stride;
}
#else
#endif
}
void Pool3x3Max(vector<int> strides, vector<int> paddings, const Tensor *input,
Tensor *output) {
#if __ARM_NEON
const int batch_size = input->dims()[0];
const int input_height = input->dims()[2];
const int input_width = input->dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
// const int _kernel_size = 3;
const int stride = strides[0];
// const int stride_width = strides[1];
const int padding = paddings[0];
// const int padding_width = paddings[1];
const float negative_max = -INT_MAX;
const int input_channel_stride = input_height * input_width;
const int output_channel_stride = output_height * output_width;
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
const int input_batch_stride = output_channels * input_channel_stride;
const int output_batch_stride = output_channels * output_channel_stride;
const float *pos1, *output_ptr;
int hstart, wstart, hend, wend;
for (int i = 0; i < batch_size; ++i) {
#pragma omp parallel for
for (int c = 0; c < output_channels; ++c) {
const float *input_seg = input_data + c * input_channel_stride;
float *output_seg = output_data + c * output_channel_stride;
for (int ph = 0; ph < output_height; ph++) {
int hstart = ph * stride - padding;
int hend = min(hstart + 3, input_height);
hstart = max(hstart, 0);
for (int pw = 0; pw < output_width; pw++) {
int wstart = pw * stride - padding;
int wend = min(wstart + 3, input_width);
wstart = max(wstart, 0);
const float *pos1 = input_seg + hstart * input_width + wstart;
const float *pos2 = input_seg + (hstart + 1) * input_width + wstart;
const float *pos3 = input_seg + (hstart + 2) * input_width + wstart;
output_ptr = output_seg + ph * output_width + pw;
if (hend - hstart != 3 || wend - wstart != 3) {
float max_value = -INT_MAX;
for (int h = hstart; h < hend; h++) {
for (int w = wstart; w < wend; w++) {
float value = input_seg[h * input_width + w];
if (value > max_value) {
max_value = value;
}
}
}
output_seg[ph * output_width + pw] = max_value;
} else {
#if __aarch64__
const float32x4_t data1 = vld1q_f32(pos1);
const float32x4_t data2 = vld1q_f32(pos1 + input_width);
const float32x4_t data3 = vld1q_f32(pos1 + 2 * input_width);
const float32x4_t max_data =
vmaxq_f32(vmaxq_f32(data1, data2), data3);
float32x2_t res =
vpmax_f32(vget_high_f32(vsetq_lane_f32(-INT_MAX, max_data, 3)),
vget_low_f32(max_data));
res = vpmax_f32(res, res);
output_seg[ph * output_width + pw] = vget_lane_f32(res, 0);
#else
asm volatile(
"vld1.32 {q1}, [%[pos1]] \n\t"
"vld1.32 {q2}, [%[pos2]] \n\t"
"vld1.32 {q3}, [%[pos3]] \n\t"
"vmax.f32 q1, q1, q2 \n\t"
"vmax.f32 q2, q1, q3 \n\t"
"vmov.f32 d5[1], %[negative_max] \n\t"
"vpmax.f32 d6, d4, d5 \n\t"
"vpmax.f32 d7, d6, d6 \n\t"
"vst1.32 {d7[0]},[%[output_ptr]] \n\t"
:
: [input_seg] "r"(input_seg), [pos1] "r"(pos1),
[pos2] "r"(pos2), [pos3] "r"(pos3),
[output_ptr] "r"(output_ptr), [negative_max] "r"(negative_max)
: "memory", "q1", "q2", "q3", "q4");
#endif
}
}
}
}
input_data += input_batch_stride;
output_data += output_batch_stride;
}
#endif
}
void Pool3x3Avg(vector<int> strides, vector<int> paddings, const Tensor *input,
Tensor *output) {
#if __ARM_NEON
const int batch_size = input->dims()[0];
const int input_height = input->dims()[2];
const int input_width = input->dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const int stride = strides[0];
const int padding = paddings[0];
const int input_channel_stride = input_height * input_width;
const int output_channel_stride = output_height * output_width;
const float *input_data = input->data<float>();
float *output_data = output->mutable_data<float>();
const float zero = 0;
const float nine = 1.0 / 9.0;
const float nine_ptr[] = {nine, nine};
const int input_batch_stride = output_channels * input_channel_stride;
const int output_batch_stride = output_channels * output_channel_stride;
for (int i = 0; i < batch_size; ++i) {
#pragma omp parallel for
for (int c = 0; c < output_channels; ++c) {
const float *input_seg = input_data + c * input_channel_stride;
float *output_seg = output_data + c * output_channel_stride;
for (int ph = 0; ph < output_height; ph++) {
for (int pw = 0; pw < output_width; pw++) {
int hstart = ph * stride - padding;
int wstart = pw * stride - padding;
int hend = min(hstart + 3, input_height + padding);
int wend = min(wstart + 3, input_width + padding);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
hend = min(hend, input_height);
wend = min(wend, input_width);
const float *pos1 = input_seg + hstart * input_width + wstart;
const float *pos2 = input_seg + (hstart + 1) * input_width + wstart;
const float *pos3 = input_seg + (hstart + 2) * input_width + wstart;
float *output_ptr = output_seg + ph * output_width + pw;
if (hend - hstart != 3 || wend - wstart != 3) {
float sum = 0;
for (int h = hstart; h < hend; h++) {
for (int w = wstart; w < wend; w++) {
sum += input_seg[h * input_width + w];
}
}
output_seg[ph * output_width + pw] =
sum / ((hend - hstart) * (wend - wstart) * 1.0);
} else {
#if __aarch64__
#else
asm volatile(
"vld1.32 {q1}, [%[pos1]] \n\t"
"vld1.32 {q2}, [%[pos2]] \n\t"
"vld1.32 {q3}, [%[pos3]] \n\t"
"vadd.f32 q1, q1, q2 \n\t"
"vadd.f32 q2, q1, q3 \n\t"
"vmov.f32 d5[1], %[zero] \n\t"
"vpadd.f32 d6, d4, d5 \n\t"
"vpadd.f32 d6, d6, d6 \n\t"
"vld1.f32 d7, [%[nine_ptr]]! \n\t"
"vmul.f32 d6,d7 \n\t"
"vst1.32 {d6[0]},[%[output_ptr]] \n\t"
:
: [input_seg] "r"(input_seg), [pos1] "r"(pos1),
[pos2] "r"(pos2), [pos3] "r"(pos3),
[output_ptr] "r"(output_ptr), [zero] "r"(zero),
[nine_ptr] "r"(nine_ptr)
: "memory", "r6", "q1", "q2", "q3", "q4");
#endif
const float32x4_t data1 = vld1q_f32(pos1);
const float32x4_t data2 = vld1q_f32(pos2);
const float32x4_t data3 = vld1q_f32(pos3);
const float32x4_t sum_data =
vaddq_f32(vaddq_f32(data1, data3), data2);
float32x2_t res =
vpadd_f32(vget_high_f32(vsetq_lane_f32(0, sum_data, 3)),
vget_low_f32(sum_data));
res = vpadd_f32(res, res);
output_seg[ph * output_width + pw] = vget_lane_f32(res, 0) / 9.0;
}
}
}
}
input_data += input_batch_stride;
output_data += output_batch_stride;
}
#else
#endif
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#pragma once
#ifdef _OPENMP
#include <omp.h>
#endif
#include <algorithm>
#include <vector>
#include "framework/tensor.h"
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
namespace paddle_mobile {
namespace operators {
namespace math {
void Pool3x3Avgs1p1(const framework::Tensor *input, framework::Tensor *output);
void Pool3x3Maxs1p1(const framework::Tensor *input, framework::Tensor *output);
void Pool3x3Max(std::vector<int> strides, std::vector<int> paddings,
const framework::Tensor *input, framework::Tensor *output);
void Pool3x3Avg(std::vector<int> strides, std::vector<int> paddings,
const framework::Tensor *in_x, framework::Tensor *out);
void Pool3x3Maxs1_int8(const framework::Tensor *input,
framework::Tensor *output, int32_t pad_h, int32_t pad_w);
void Pool3x3Maxs2_int8(const framework::Tensor *input,
framework::Tensor *output, int32_t pad_h, int32_t pad_w);
void Pool3x3Max_int8(const std::vector<int> &strides,
const std::vector<int> &paddings,
const framework::Tensor *input, framework::Tensor *output);
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#ifdef _OPENMP
#include <omp.h>
#endif
#include "framework/tensor.h"
#include "operators/math/pool_3x3.h"
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
#include <climits>
#include <iostream>
namespace paddle_mobile {
namespace operators {
namespace math {
using framework::Tensor;
using std::max;
using std::min;
using std::vector;
template <typename T>
static void make_paddings(const Tensor *input, Tensor *padded_input,
int32_t top, int32_t bottom, int32_t left,
int32_t right, T value) {
const int32_t batch_size = input->dims()[0];
const int32_t c_in = input->dims()[1];
const int32_t h_in = input->dims()[2];
const int32_t w_in = input->dims()[3];
const int32_t h_padded = h_in + top + bottom;
const int32_t w_padded = w_in + left + right;
padded_input->Resize({batch_size, c_in, h_padded, w_padded});
T *padded_input_data = padded_input->mutable_data<T>();
const T *input_data = input->data<T>();
const int32_t input_channel_stride = h_in * w_in;
const int32_t input_batch_stride = c_in * input_channel_stride;
const int32_t padded_channel_stride = h_padded * w_padded;
const int32_t padded_batch_stride = c_in * padded_channel_stride;
for (int i = 0; i < batch_size; ++i) {
#pragma omp parallel for
for (int j = 0; j < c_in; ++j) {
const T *img_in = input_data + j * input_channel_stride;
T *img_padded = padded_input_data + j * padded_channel_stride;
int k = 0;
for (; k < top; ++k) {
for (int l = 0; l < w_padded; ++l) {
img_padded[l] = value;
}
img_padded += w_padded;
}
for (; k < top + h_in; ++k) {
int l = 0;
for (; l < left; ++l) {
img_padded[l] = value;
}
memcpy(img_padded + left, img_in, w_in * sizeof(T));
l += w_in;
img_in += w_in;
for (; l < w_padded; ++l) {
img_padded[l] = value;
}
img_padded += w_padded;
}
for (; k < h_padded; ++k) {
for (int l = 0; l < w_padded; ++l) {
img_padded[l] = value;
}
img_padded += w_padded;
}
}
input_data += input_batch_stride;
padded_input_data += padded_batch_stride;
}
// input_data = input->data<T>();
// std::cout << "+++++++++++++++++++Origin begin++++++++++++++++++++"
// << std::endl;
// for (int i = 0; i < 1; ++i) {
// for (int j = 0; j < 1; ++j) {
// const T *img_in = input_data + j * input_channel_stride;
// for (int k = 0; k < h_in; ++k) {
// for (int l = 0; l < w_in; ++l) {
// std::cout << (int32_t)*img_in << "\t";
// img_in++;
// }
// std::cout << std::endl;
// }
// }
// input_data += input_batch_stride;
// }
// std::cout << "+++++++++++++++++++Origin end++++++++++++++++++++" <<
// std::endl;
//
// padded_input_data = padded_input->data<T>();
// std::cout << "******************Padding begin**********************"
// << std::endl;
// for (int i = 0; i < 1; ++i) {
// for (int j = 0; j < 1; ++j) {
// T *img_padded = padded_input_data + j * padded_channel_stride;
// for (int k = 0; k < h_padded; ++k) {
// for (int l = 0; l < w_padded; ++l) {
// std::cout << (int32_t)*img_padded << "\t";
// img_padded++;
// }
// std::cout << std::endl;
// }
// }
// padded_input_data += padded_batch_stride;
// }
// std::cout << "******************Padding end**********************"
// << std::endl;
}
void Pool3x3Maxs1_int8(const Tensor *input, Tensor *output, int32_t pad_h,
int32_t pad_w) {
Tensor padded_input;
if (pad_h != 0 && pad_w != 0) {
int8_t value = -SCHAR_MAX;
make_paddings(input, &padded_input, pad_h, pad_h, pad_w, pad_w, value);
input = &padded_input;
}
const int32_t batch_size = input->dims()[0];
const int32_t h_in = input->dims()[2];
const int32_t w_in = input->dims()[3];
const int8_t *input_data = input->data<int8_t>();
const int32_t output_channels = output->dims()[1];
const int32_t h_out = output->dims()[2];
const int32_t w_out = output->dims()[3];
int8_t *output_data = output->mutable_data<int8_t>();
const int32_t outputdata_channel_stride = h_out * w_out;
const int32_t inputdata_channel_stride = h_in * w_in;
const int32_t input_batch_stride = output_channels * inputdata_channel_stride;
const int32_t output_batch_stride =
output_channels * outputdata_channel_stride;
// std::cout << "h_out = " << h_out << ", w_out=" << w_out << std::endl;
for (int i = 0; i < batch_size; ++i) {
#pragma omp parallel for
for (int j = 0; j < output_channels; ++j) {
const int8_t *img_in = input_data + j * inputdata_channel_stride;
int8_t *img_out = output_data + j * outputdata_channel_stride;
for (int k = 0; k < h_out; ++k) {
const int8_t *row0 = img_in + k * w_in;
const int8_t *row1 = img_in + (k + 1) * w_in;
const int8_t *row2 = img_in + (k + 2) * w_in;
#if __ARM_NEON
int32_t nw = w_out >> 4;
int32_t left_w = w_out & 0xf;
int32_t nw1 = left_w >> 3;
int32_t left_w1 = left_w & 0x7;
#if __aarch64__
// TODO
#else
if (nw > 0) {
#define LOOP_LABEL "1"
// result: q15
asm volatile(
"vld1.8 {q0}, [%[row0]]! \n\t" // q0=0-15
"vld1.8 {q2}, [%[row1]]! \n\t"
"vld1.8 {q4}, [%[row2]]! \n\t"
LOOP_LABEL
": \n\t"
"vld1.8 {q1}, [%[row0]]! \n\t" // q1=16-31
"vext.8 q6, q0, q1, #1 \n\t"
"vext.8 q7, q0, q1, #2 \n\t"
"vld1.8 {q3}, [%[row1]]! \n\t"
"vmax.s8 q15, q0, q6 \n\t"
"vmax.s8 q15, q15, q7 \n\t"
"vext.8 q6, q2, q3, #1 \n\t"
"vext.8 q7, q2, q3, #2 \n\t"
"vld1.8 {q5}, [%[row2]]! \n\t"
"vmax.s8 q14, q2, q6 \n\t"
"vmax.s8 q14, q14, q7 \n\t"
"vext.8 q6, q4, q5, #1 \n\t"
"vext.8 q7, q4, q5, #2 \n\t"
"vmax.s8 q13, q4, q6 \n\t"
"vmax.s8 q13, q13, q7 \n\t"
"vmax.s8 q15, q15, q14 \n\t"
"vmax.s8 q15, q15, q13 \n\t"
"vmov.s8 q0, q1 \n\t"
"vmov.s8 q2, q3 \n\t"
"vmov.s8 q4, q5 \n\t"
"vst1.8 {q15}, [%[img_out]]! \n\t"
"subs %[nw], #1 \n\t"
"bne " LOOP_LABEL
"b \n\t"
"sub %[row0], #16 \n\t"
"sub %[row1], #16 \n\t"
"sub %[row2], #16 \n\t"
: [nw] "+r"(nw), [row0] "+r"(row0), [row1] "+r"(row1),
[row2] "+r"(row2), [img_out] "+r"(img_out)
:
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
"q13", "q14", "q15");
#undef LOOP_LABEL
}
if (nw1 > 0 || left_w1 > 0) {
#define PADDLE_LABEL_LESS8 "1"
#define PADDLE_LABEL_LESS8_SAVE "2"
#define PADDLE_LABEL_OVER "3"
// result: d15
asm volatile(
"vld1.8 {d0}, [%[row0]]! \n\t" // d0=0-8
"vld1.8 {d2}, [%[row1]]! \n\t"
"vld1.8 {d4}, [%[row2]]! \n\t"
"mov r0, #1 \n\t"
"cmp %[nw1], #0 \n\t"
"beq " PADDLE_LABEL_LESS8
"f\n\t"
"vld1.8 {d1}, [%[row0]]! \n\t" // d1=9-15
"vext.8 d6, d0, d1, #1 \n\t"
"vext.8 d7, d0, d1, #2 \n\t"
"vld1.8 {d3}, [%[row1]]! \n\t"
"vmax.s8 d15, d0, d6 \n\t"
"vmax.s8 d15, d15, d7 \n\t"
"vext.8 d6, d2, d3, #1 \n\t"
"vext.8 d7, d2, d3, #2 \n\t"
"vld1.8 {d5}, [%[row2]]! \n\t"
"vmax.s8 d14, d2, d6 \n\t"
"vmax.s8 d14, d14, d7 \n\t"
"vext.8 d6, d4, d5, #1 \n\t"
"vext.8 d7, d4, d5, #2 \n\t"
"vmax.s8 d13, d4, d6 \n\t"
"vmax.s8 d13, d13, d7 \n\t"
"vmax.s8 d15, d15, d14 \n\t"
"vmax.s8 d15, d15, d13 \n\t"
"vmov.s8 d0, d1 \n\t"
"vmov.s8 d2, d3 \n\t"
"vmov.s8 d4, d5 \n\t"
"vst1.8 {d15}, [%[img_out]]! \n\t"
PADDLE_LABEL_LESS8
": \n\t"
"cmp %[left_w1], #0 \n\t"
"beq " PADDLE_LABEL_OVER
"f\n\t"
"vld1.8 {d1}, [%[row0]] \n\t" // d1=9-15
"vext.8 d6, d0, d1, #1 \n\t"
"vext.8 d7, d0, d1, #2 \n\t"
"vld1.8 {d3}, [%[row1]] \n\t"
"vmax.s8 d15, d0, d6 \n\t"
"vmax.s8 d15, d15, d7 \n\t"
"vext.8 d6, d2, d3, #1 \n\t"
"vext.8 d7, d2, d3, #2 \n\t"
"vld1.8 {d5}, [%[row2]] \n\t"
"vmax.s8 d14, d2, d6 \n\t"
"vmax.s8 d14, d14, d7 \n\t"
"vext.8 d6, d4, d5, #1 \n\t"
"vext.8 d7, d4, d5, #2 \n\t"
"vmax.s8 d13, d4, d6 \n\t"
"vmax.s8 d13, d13, d7 \n\t"
"vmax.s8 d15, d15, d14 \n\t"
"vmax.s8 d15, d15, d13 \n\t"
PADDLE_LABEL_LESS8_SAVE
": \n\t"
"vst1.8 {d15[0]}, [%[img_out]], r0\n\t"
"add %[row0], %[row0], #1 \n\t"
"add %[row1], %[row1], #1 \n\t"
"add %[row2], %[row2], #1 \n\t"
"vext.8 d15, d15, d15, #1 \n\t"
"subs %[left_w1], #1 \n\t"
"bgt " PADDLE_LABEL_LESS8_SAVE "b \n\t"
PADDLE_LABEL_OVER ": \n\t"
: [nw1] "+r"(nw1), [left_w1] "+r"(left_w1), [row0] "+r"(row0),
[row1] "+r"(row1), [row2] "+r"(row2), [img_out] "+r"(img_out)
:
: "cc", "memory", "r0", "d0", "d1", "d2", "d3", "d4", "d5", "d6",
"d7", "d13", "d14", "d15");
#undef PADDLE_LABEL_OVER
#undef PADDLE_LABEL_LESS8_SAVE
#undef PADDLE_LABEL_LESS8
}
#endif // __aarch64__
#else
int32_t left = w_out;
while (left > 0) {
const int8_t max0 = std::max(std::max(row0[0], row0[1]), row0[2]);
const int8_t max1 = std::max(std::max(row1[0], row1[1]), row1[2]);
const int8_t max2 = std::max(std::max(row2[0], row2[1]), row2[2]);
*img_out = std::max(std::max(max0, max1), max2);
row0 += 1;
row1 += 1;
row2 += 1;
img_out++;
left--;
}
#endif // __ARM_NEON
}
}
input_data += input_batch_stride;
output_data += output_batch_stride;
}
}
void Pool3x3Maxs2_int8(const Tensor *input, Tensor *output, int32_t pad_h,
int32_t pad_w) {
Tensor padded_input;
if (pad_h != 0 && pad_w != 0) {
int8_t value = -SCHAR_MAX;
make_paddings(input, &padded_input, pad_h, pad_h, pad_w, pad_w, value);
input = &padded_input;
}
const int32_t batch_size = input->dims()[0];
const int32_t h_in = input->dims()[2];
const int32_t w_in = input->dims()[3];
const int32_t output_channels = output->dims()[1];
const int32_t h_out = output->dims()[2];
const int32_t w_out = output->dims()[3];
const int32_t outputdata_channel_stride = h_out * w_out;
const int32_t inputdata_channel_stride = h_in * w_in;
const int32_t output_batch_stride =
output_channels * outputdata_channel_stride;
const int32_t input_batch_stride = output_channels * inputdata_channel_stride;
const int8_t *input_data = input->data<int8_t>();
int8_t *output_data = output->mutable_data<int8_t>();
for (int i = 0; i < batch_size; ++i) {
#pragma omp parallel for
for (int j = 0; j < output_channels; ++j) {
const int8_t *img_in = input_data + j * inputdata_channel_stride;
int8_t *img_out = output_data + j * outputdata_channel_stride;
for (int k = 0; k < h_out; ++k) {
const int8_t *row0 = img_in + 2 * k * w_in;
const int8_t *row1 = img_in + (2 * k + 1) * w_in;
const int8_t *row2 = img_in + (2 * k + 2) * w_in;
#if __ARM_NEON
int32_t nw = w_out >> 4;
int32_t left_w = w_out & 0xf;
int32_t nw1 = left_w >> 3;
int32_t left_w1 = left_w & 0x7;
#if __aarch64__
// TODO
#else
if (nw > 0) {
#define LOOP_LABEL "1"
// result: q15
asm volatile(
"vld2.8 {q0, q1}, [%[row0]]! \n\t" // q0=0-30, q1=1-31
"vld2.8 {q2, q3}, [%[row1]]! \n\t"
"vld2.8 {q4, q5}, [%[row2]]! \n\t"
LOOP_LABEL
": \n\t"
"vmax.s8 q15, q0, q1 \n\t"
"vld2.8 {q6, q7}, [%[row0]]! \n\t" // q0=32-62, q1=33-63
"vmax.s8 q14, q2, q3 \n\t"
"vmax.s8 q13, q4, q5 \n\t"
"vld2.8 {q8, q9}, [%[row1]]! \n\t"
"vext.8 q0, q0, q6, #1 \n\t"
"vmax.s8 q15, q15, q0 \n\t"
"vld2.8 {q10, q11}, [%[row2]]! \n\t"
"vext.8 q2, q2, q8, #1 \n\t"
"vmax.s8 q14, q14, q2 \n\t"
"vext.8 q4, q4, q10, #1 \n\t"
"vmax.s8 q13, q13, q4 \n\t"
"vmax.s8 q15, q15, q14 \n\t"
"vmax.s8 q15, q15, q13 \n\t"
"vmov.s8 q0, q6 \n\t"
"vmov.s8 q1, q7 \n\t"
"vmov.s8 q2, q8 \n\t"
"vmov.s8 q3, q9 \n\t"
"vmov.s8 q4, q10 \n\t"
"vmov.s8 q5, q11 \n\t"
"vst1.8 {q15}, [%[img_out]]! \n\t"
"subs %[nw], #1 \n\t"
"bne " LOOP_LABEL
"b \n\t"
"sub %[row0], #32 \n\t"
"sub %[row1], #32 \n\t"
"sub %[row2], #32 \n\t"
: [nw] "+r"(nw), [row0] "+r"(row0), [row1] "+r"(row1),
[row2] "+r"(row2), [img_out] "+r"(img_out)
:
: "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
"q8", "q9", "q10", "q11", "q13", "q14", "q15");
#undef LOOP_LABEL
}
if (nw1 > 0 || left_w1 > 0) {
#define PADDLE_LABEL_LESS8 "1"
#define PADDLE_LABEL_LESS8_SAVE "2"
#define PADDLE_LABEL_OVER "3"
// result: d15
asm volatile(
"vld2.8 {d0, d1}, [%[row0]]! \n\t" // d0=0-14, d1=1-15
"vld2.8 {d2, d3}, [%[row1]]! \n\t"
"vld2.8 {d4, d5}, [%[row2]]! \n\t"
"mov r0, #1 \n\t"
"cmp %[nw1], #0 \n\t"
"beq " PADDLE_LABEL_LESS8
"f\n\t"
"vmax.s8 d15, d0, d1 \n\t"
"vld2.8 {d6, d7}, [%[row0]]! \n\t" // d0=32-62, d1=33-63
"vmax.s8 d14, d2, d3 \n\t"
"vmax.s8 d13, d4, d5 \n\t"
"vld2.8 {d8, d9}, [%[row1]]! \n\t"
"vext.8 d0, d0, d6, #1 \n\t"
"vmax.s8 d15, d15, d0 \n\t"
"vld2.8 {d10, d11}, [%[row2]]! \n\t"
"vext.8 d2, d2, d8, #1 \n\t"
"vmax.s8 d14, d14, d2 \n\t"
"vext.8 d4, d4, d10, #1 \n\t"
"vmax.s8 d13, d13, d4 \n\t"
"vmax.s8 d15, d15, d14 \n\t"
"vmax.s8 d15, d15, d13 \n\t"
"vmov.s8 d0, d6 \n\t"
"vmov.s8 d1, d7 \n\t"
"vmov.s8 d2, d8 \n\t"
"vmov.s8 d3, d9 \n\t"
"vmov.s8 d4, d10 \n\t"
"vmov.s8 d5, d11 \n\t"
"vst1.8 {d15}, [%[img_out]]! \n\t"
PADDLE_LABEL_LESS8
": \n\t"
"cmp %[left_w1], #0 \n\t"
"beq " PADDLE_LABEL_OVER
"f\n\t"
"vmax.s8 d15, d0, d1 \n\t"
"vld2.8 {d6, d7}, [%[row0]] \n\t" // d0=32-62, d1=33-63
"vmax.s8 d14, d2, d3 \n\t"
"vmax.s8 d13, d4, d5 \n\t"
"vld2.8 {d8, d9}, [%[row1]] \n\t"
"vext.8 d0, d0, d6, #1 \n\t"
"vmax.s8 d15, d15, d0 \n\t"
"vld2.8 {d10, d11}, [%[row2]] \n\t"
"vext.8 d2, d2, d8, #1 \n\t"
"vmax.s8 d14, d14, d2 \n\t"
"vext.8 d4, d4, d10, #1 \n\t"
"vmax.s8 d13, d13, d4 \n\t"
"vmax.s8 d15, d15, d14 \n\t"
"vmax.s8 d15, d15, d13 \n\t"
PADDLE_LABEL_LESS8_SAVE
": \n\t"
"vst1.8 {d15[0]}, [%[img_out]], r0\n\t"
"add %[row0], %[row0], #2 \n\t"
"add %[row1], %[row1], #2 \n\t"
"add %[row2], %[row2], #2 \n\t"
"vext.8 d15, d15, d15, #1 \n\t"
"subs %[left_w1], #1 \n\t"
"bgt " PADDLE_LABEL_LESS8_SAVE "b \n\t"
PADDLE_LABEL_OVER ": \n\t"
: [nw1] "+r"(nw1), [left_w1] "+r"(left_w1), [row0] "+r"(row0),
[row1] "+r"(row1), [row2] "+r"(row2), [img_out] "+r"(img_out)
:
: "cc", "memory", "r0", "d0", "d1", "d2", "d3", "d4", "d5", "d6",
"d7", "d8", "d9", "d10", "d11", "d13", "d14", "d15");
#undef PADDLE_LABEL_OVER
#undef PADDLE_LABEL_LESS8_SAVE
#undef PADDLE_LABEL_LESS8
}
#endif // __aarch64__
#else
int32_t left = w_out;
while (left > 0) {
const int8_t max0 = std::max(std::max(row0[0], row0[1]), row0[2]);
const int8_t max1 = std::max(std::max(row1[0], row1[1]), row1[2]);
const int8_t max2 = std::max(std::max(row2[0], row2[1]), row2[2]);
*img_out = std::max(std::max(max0, max1), max2);
row0 += 2;
row1 += 2;
row2 += 2;
img_out++;
left--;
}
#endif // __ARM_NEON
}
}
input_data += input_batch_stride;
output_data += output_batch_stride;
}
}
void Pool3x3Max_int8(const vector<int> &strides, const vector<int> &paddings,
const Tensor *input, Tensor *output) {
const int batch_size = input->dims()[0];
const int input_height = input->dims()[2];
const int input_width = input->dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
// const int _kernel_size = 3;
const int stride = strides[0];
// const int stride_width = strides[1];
const int padding = paddings[0];
// const int padding_width = paddings[1];
const int8_t negative_max = -SCHAR_MAX;
const int input_channel_stride = input_height * input_width;
const int output_channel_stride = output_height * output_width;
const int8_t *input_data = input->data<int8_t>();
int8_t *output_data = output->mutable_data<int8_t>();
const int input_batch_stride = output_channels * input_channel_stride;
const int output_batch_stride = output_channels * output_channel_stride;
for (int i = 0; i < batch_size; ++i) {
#pragma omp parallel for
for (int c = 0; c < output_channels; ++c) {
const int8_t *input_seg = input_data + c * input_channel_stride;
int8_t *output_seg = output_data + c * output_channel_stride;
for (int ph = 0; ph < output_height; ph++) {
int hstart = ph * stride - padding;
int hend = min(hstart + 3, input_height);
hstart = max(hstart, 0);
for (int pw = 0; pw < output_width; pw++) {
int wstart = pw * stride - padding;
int wend = min(wstart + 3, input_width);
wstart = max(wstart, 0);
const int8_t *pos1 = input_seg + hstart * input_width + wstart;
const int8_t *pos2 = input_seg + (hstart + 1) * input_width + wstart;
const int8_t *pos3 = input_seg + (hstart + 2) * input_width + wstart;
int8_t *output_ptr = output_seg + ph * output_width + pw;
if (hend - hstart != 3 || wend - wstart != 3) {
int8_t max_value = -SCHAR_MAX;
for (int h = hstart; h < hend; h++) {
for (int w = wstart; w < wend; w++) {
int8_t value = input_seg[h * input_width + w];
if (value > max_value) {
max_value = value;
}
}
}
output_seg[ph * output_width + pw] = max_value;
} else {
#if __ARM_NEON
#if __aarch64__
// TODO
#else
asm volatile(
"vld1.8 {d0}, [%[pos1]] \n\t"
"vld1.8 {d1}, [%[pos2]] \n\t"
"vld1.8 {d2}, [%[pos3]] \n\t"
"vmax.s8 d3, d0, d1 \n\t"
"vmax.s8 d4, d2, d3 \n\t"
"vmov.s8 d4[3], %[negative_max] \n\t"
"vpmax.s8 d5, d4, d4 \n\t"
"vpmax.s8 d6, d5, d5 \n\t"
"vst1.8 {d6[0]},[%[output_ptr]] \n\t"
:
: [pos1] "r"(pos1), [pos2] "r"(pos2), [pos3] "r"(pos3),
[output_ptr] "r"(output_ptr), [negative_max] "r"(negative_max)
: "memory", "q0", "q1", "q2", "q3");
#endif
#else
const int8_t max0 = std::max(std::max(pos1[0], pos1[1]), pos1[2]);
const int8_t max1 = std::max(std::max(pos2[0], pos2[1]), pos2[2]);
const int8_t max2 = std::max(std::max(pos3[0], pos3[1]), pos3[2]);
*output_ptr = std::max(std::max(max0, max1), max2);
#endif // __ARM_NEON
}
}
}
}
input_data += input_batch_stride;
output_data += output_batch_stride;
}
}
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif
...@@ -14,53 +14,42 @@ limitations under the License. */ ...@@ -14,53 +14,42 @@ limitations under the License. */
#ifdef POOL_OP #ifdef POOL_OP
#include "pooling.h" #include "operators/math/pooling.h"
#include "common/types.h"
#ifdef _OPENMP
#include <omp.h>
#endif
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
/* template <PoolingType P>
* All tensors are in NCHW format. void Pooling<P>::operator()(const framework::Tensor &input,
* Ksize, strides, paddings are two elements. These two elements represent const std::vector<int> &kernel_size,
* height and width, respectively.
*/
template <typename PoolProcess, typename T>
class PoolFunctor<CPU, PoolProcess, T> {
public:
void operator()(const framework::Tensor &input, const std::vector<int> &ksize,
const std::vector<int> &strides, const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_process, const std::vector<int> &paddings,
framework::Tensor *output) { framework::Tensor *output) {
const int batch_size = input.dims()[0]; const int batch_size = input.dims()[0];
const int input_height = input.dims()[2]; const int input_height = input.dims()[2];
const int input_width = input.dims()[3]; const int input_width = input.dims()[3];
const int output_channels = output->dims()[1]; const int output_channels = output->dims()[1];
const int output_height = output->dims()[2]; const int output_height = output->dims()[2];
const int output_width = output->dims()[3]; const int output_width = output->dims()[3];
const int ksize_height = ksize[0]; const int ksize_height = kernel_size[0];
const int ksize_width = ksize[1]; const int ksize_width = kernel_size[1];
const int stride_height = strides[0]; const int stride_height = strides[0];
const int stride_width = strides[1]; const int stride_width = strides[1];
const int padding_height = paddings[0]; const int padding_height = paddings[0];
const int padding_width = paddings[1]; const int padding_width = paddings[1];
const int input_stride = input_height * input_width; const float *input_data = input.data<float>();
const int output_stride = output_height * output_width; float *output_data = output->mutable_data<float>();
const size_t input_spatial_size = input_height * input_width;
const size_t output_spatial_size = output_height * output_width;
const T *input_data = input.data<T>(); #pragma omp parallel for collapse(2)
T *output_data = output->mutable_data<T>();
for (int i = 0; i < batch_size; i++) { for (int i = 0; i < batch_size; i++) {
for (int c = 0; c < output_channels; ++c) { for (int c = 0; c < output_channels; ++c) {
#pragma omp parallel for int channel = i * output_channels + c;
const float *input_ptr = input_data + channel * input_spatial_size;
float *output_ptr = output_data + channel * output_spatial_size;
for (int ph = 0; ph < output_height; ++ph) { for (int ph = 0; ph < output_height; ++ph) {
int hstart = ph * stride_height - padding_height; int hstart = ph * stride_height - padding_height;
int hend = std::min(hstart + ksize_height, input_height); int hend = std::min(hstart + ksize_height, input_height);
...@@ -70,30 +59,24 @@ class PoolFunctor<CPU, PoolProcess, T> { ...@@ -70,30 +59,24 @@ class PoolFunctor<CPU, PoolProcess, T> {
int wend = std::min(wstart + ksize_width, input_width); int wend = std::min(wstart + ksize_width, input_width);
wstart = std::max(wstart, 0); wstart = std::max(wstart, 0);
auto ele = pool_process.initial(); PoolingVal<P> val;
for (int h = hstart; h < hend; ++h) { for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) { for (int w = wstart; w < wend; ++w) {
pool_process.compute(input_data[h * input_width + w], &ele); val += input_ptr[h * input_width + w];
} }
} }
int pool_size = (hend - hstart) * (wend - wstart); output_ptr[ph * output_width + pw] = val.Value();
pool_process.finalize(static_cast<float>(pool_size), &ele);
output_data[ph * output_width + pw] = static_cast<T>(ele);
} }
} }
input_data += input_stride;
output_data += output_stride;
} }
} }
} }
};
template struct Pooling<MAX>;
template struct Pooling<AVG>;
template class PoolFunctor<CPU, math::AvgPool<float, float>, float>;
template class PoolFunctor<CPU, math::MaxPool<float>, float>;
template class PoolFunctor<CPU, math::AvgPool<int8_t, int32_t>, int8_t>;
template class PoolFunctor<CPU, math::MaxPool<int8_t>, int8_t>;
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
#endif #endif // POOL_OP
...@@ -16,75 +16,138 @@ limitations under the License. */ ...@@ -16,75 +16,138 @@ limitations under the License. */
#pragma once #pragma once
#include <climits> #include <algorithm>
#include <cmath> #include <cmath>
#include "common/log.h" #include <limits>
#include <vector>
#include "common/types.h"
#include "framework/tensor.h" #include "framework/tensor.h"
#include "pool_2x2.h" #if defined(__ARM_NEON) || defined(__ARM_NEON__)
#include "pool_3x3.h" #include <arm_neon.h>
#endif
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
namespace math { namespace math {
#define FLT_MAX __FLT_MAX__ template <PoolingType P = MAX>
struct PoolingVal {
/* float val;
* \brief Extracting simple operations from pooling. int count;
* Both MaxPool and AvgPool need "initial", "compute" and "finalize" PoolingVal() : count(0) { val = -std::numeric_limits<float>::max(); }
* operation. inline PoolingVal<P> &operator+=(const float &x) {
* MaxPool initializes temp variable to the negative maximum to find the val = std::max(val, x);
* maximum value in the pooling field. ++count;
* AvgPool initializes temp variable to the zero to accumulate all values return *this;
* in pool pooling, and finally takes the average.
* MaxPoolGrad and AvgPoolGrad are gradient operations respectively.
*/
template <typename T>
class MaxPool {
public:
inline T initial() {
if (typeid(T) == typeid(int8_t)) {
return static_cast<T>(-SCHAR_MAX);
} }
return static_cast<T>(-FLT_MAX); inline float Value() { return (count > 0) ? val : 0.f; }
};
template <>
struct PoolingVal<AVG> {
float val;
int count;
PoolingVal() : val(0.f), count(0) {}
inline PoolingVal<AVG> &operator+=(const float &x) {
val += x;
++count;
return *this;
} }
inline float Value() { return (count > 0) ? val / count : 0.f; }
};
inline void compute(const T &x, T *y) { *y = *y > x ? *y : x; } #if defined(__ARM_NEON) || defined(__ARM_NEON__)
template <PoolingType P = MAX>
inline float32x4_t vPoolInitq_f32() {
return vdupq_n_f32(-std::numeric_limits<float>::max());
}
template <>
inline float32x4_t vPoolInitq_f32<AVG>() {
return vdupq_n_f32(0.f);
}
template <PoolingType P = MAX>
inline float32x4_t vPoolPreq_f32(const float32x4_t &x1, const float32x4_t &x2) {
return vmaxq_f32(x1, x2);
}
template <>
inline float32x4_t vPoolPreq_f32<AVG>(const float32x4_t &x1,
const float32x4_t &x2) {
return vaddq_f32(x1, x2);
}
template <PoolingType P = MAX>
inline float32x4_t vPoolPostq_f32(const float32x4_t &x,
const float32x4_t &post) {
return x;
}
template <>
inline float32x4_t vPoolPostq_f32<AVG>(const float32x4_t &x,
const float32x4_t &post) {
return vmulq_f32(x, post);
}
#endif // __ARM_NEON__
template <PoolingType P = MAX>
inline float PoolPre(const float &x1, const float &x2) {
return std::max(x1, x2);
}
template <>
inline float PoolPre<AVG>(const float &x1, const float &x2) {
return x1 + x2;
}
template <PoolingType P = MAX>
inline float PoolPost(const float &x, const float &post) {
return x;
}
template <>
inline float PoolPost<AVG>(const float &x, const float &post) {
return x * post;
}
template <PoolingType P>
struct Pooling {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &kernel_size,
const std::vector<int> &strides,
const std::vector<int> &paddings,
framework::Tensor *output);
};
inline void finalize(const T &pool_field, T *y) {} template <PoolingType P, int Stride>
struct Pooling2x2 {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output);
}; };
template <typename Itype, typename Otype> template <PoolingType P, int Stride>
class AvgPool { struct Pooling3x3 {
public: inline void operator()(const framework::Tensor &input,
inline Otype initial() { return static_cast<Otype>(0); } const std::vector<int> &paddings,
framework::Tensor *output);
inline void compute(const Itype &x, Otype *y) { *y += x; }
inline void finalize(const float &pool_field, Otype *y) {
if (typeid(Itype) == typeid(int8_t)) {
float tmp = *y / pool_field;
if (tmp > SCHAR_MAX) {
*y = SCHAR_MAX;
} else if (tmp < -SCHAR_MAX) {
*y = -SCHAR_MAX;
} else {
*y = static_cast<Otype>(std::round(tmp));
}
} else {
*y /= pool_field;
}
}
}; };
template <typename DeviceType, typename PoolProcess, typename T> template <PoolingType P, int Stride>
class PoolFunctor { struct Pooling5x5 {
public: inline void operator()(const framework::Tensor &input,
void operator()(const framework::Tensor &input, const std::vector<int> &ksize, const std::vector<int> &paddings,
const std::vector<int> &strides,
const std::vector<int> &paddings, PoolProcess pool_compute,
framework::Tensor *output); framework::Tensor *output);
}; };
template <PoolingType P, int Stride>
struct Pooling7x7 {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output);
};
} // namespace math } // namespace math
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef POOL_OP
#include "operators/math/pooling.h"
#if defined(__ARM_NEON) || defined(__ARM_NEON__)
#include <arm_neon.h>
#endif // __ARM_NEON
namespace paddle_mobile {
namespace operators {
namespace math {
#define POOLING3X3_NORMAL_BORDER(start, end) \
for (int w = start; w < end; ++w) { \
const int w_in_start = -padding_w + w * Stride; \
const int w_in_end = w_in_start + 3; \
const int w_start = w_in_start > 0 ? w_in_start : 0; \
const int w_end = w_in_end < input_w ? w_in_end : input_w; \
PoolingVal<P> val; \
for (int h_in = h_start; h_in < h_end; ++h_in) { \
for (int w_in = w_start; w_in < w_end; ++w_in) { \
val += input[h_in * input_w + w_in]; \
} \
} \
output_ptr[w] = val.Value(); \
}
#if defined(__ARM_NEON) || defined(__ARM_NEON__)
template <PoolingType P, int Stride = 1>
struct Pooling3x3ValidColLoadInput {
inline void operator()(const float *input, const int input_w,
const int valid_cols, float32x4x2_t &x0, // NOLINT
float32x4x2_t &x1, float32x4x2_t &x2, // NOLINT
float32x4x2_t &y0) { // NOLINT
float fake_input[3][8];
if (valid_cols == 1) {
for (int i = 0; i < 8; ++i, input += input_w) {
fake_input[0][i] = input[0];
}
} else if (valid_cols == 2) {
for (int i = 0; i < 8; ++i, input += input_w) {
fake_input[0][i] = input[0];
fake_input[1][i] = input[1];
}
} else {
for (int i = 0; i < 8; ++i, input += input_w) {
fake_input[0][i] = input[0];
fake_input[1][i] = input[1];
fake_input[2][i] = input[2];
}
}
y0.val[0] = vPoolInitq_f32<P>();
y0.val[1] = vPoolInitq_f32<P>();
for (int i = 0; i < valid_cols; ++i) {
x0.val[0] = vld1q_f32(fake_input[i]);
x0.val[1] = vld1q_f32(fake_input[i] + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x1.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x1.val[1], y0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x2.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x2.val[1], y0.val[1]);
}
}
};
template <PoolingType P>
struct Pooling3x3ValidColLoadInput<P, 2> {
inline void operator()(const float *input, const int input_w,
const int valid_cols, float32x4x2_t &x0, // NOLINT
float32x4x2_t &x1, float32x4x2_t &x2, // NOLINT
float32x4x2_t &y0) { // NOLINT
float fake_input[3][13];
if (valid_cols == 1) {
for (int i = 0; i < 13; ++i, input += input_w) {
fake_input[0][i] = input[0];
}
} else if (valid_cols == 2) {
for (int i = 0; i < 13; ++i, input += input_w) {
fake_input[0][i] = input[0];
fake_input[1][i] = input[1];
}
} else {
for (int i = 0; i < 13; ++i, input += input_w) {
fake_input[0][i] = input[0];
fake_input[1][i] = input[1];
fake_input[2][i] = input[2];
}
}
for (int i = 0; i < valid_cols; ++i) {
x0 = vld2q_f32(fake_input[i]);
x1 = vld2q_f32(fake_input[i] + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
}
}
};
template <PoolingType P, int Stride = 1>
struct Pooling3x3NormalRowLoadInput {
inline void operator()(const float *input, float32x4x2_t &x0, // NOLINT
float32x4x2_t &x1, float32x4x2_t &x2, // NOLINT
float32x4x2_t &y0) { // NOLINT
x0.val[0] = vld1q_f32(input);
x0.val[1] = vld1q_f32(input + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x1.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x1.val[1], y0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x2.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x2.val[1], y0.val[1]);
}
};
template <PoolingType P>
struct Pooling3x3NormalRowLoadInput<P, 2> {
inline void operator()(const float *input, float32x4x2_t &x0, // NOLINT
float32x4x2_t &x1, float32x4x2_t &x2, // NOLINT
float32x4x2_t &y0) { // NOLINT
x0 = vld2q_f32(input);
x1 = vld2q_f32(input + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
}
};
#endif // __ARM_NEON__
template <PoolingType P, int Stride>
inline void Pooling3x3ValidCol(const float *input, const int h_output,
const int h_output_end, const int w_output,
const int input_h, const int input_w,
const int padding_h, const int padding_w,
const int output_w, float *output) {
const int w_in_start = -padding_w + w_output * Stride;
const int w_in_end = w_in_start + 3;
const int w_start = w_in_start > 0 ? w_in_start : 0;
const int w_end = w_in_end < input_w ? w_in_end : input_w;
int remain_start = h_output;
#if defined(__ARM_NEON) || defined(__ARM_NEON__)
int output_tiles = (h_output_end - h_output) / 6;
remain_start = h_output + output_tiles * 6;
int input_h_start = h_output * Stride - padding_h;
size_t input_offset = input_h_start * input_w + w_start;
size_t output_offset = h_output * output_w + w_output;
int valid_cols = w_end - w_start;
Pooling3x3ValidColLoadInput<P, Stride> PoolingCompute;
float32x4x2_t x0, x1, x2, y0;
float32x4_t avg = vdupq_n_f32(1.f / (3 * valid_cols));
for (int h = 0; h < output_tiles * 6; h += 6) {
float *output0 = output + output_offset;
float *output1 = output0 + output_w;
float *output2 = output1 + output_w;
float *output3 = output2 + output_w;
float *output4 = output3 + output_w;
float *output5 = output4 + output_w;
y0.val[0] = vPoolInitq_f32<P>();
y0.val[1] = vPoolInitq_f32<P>();
PoolingCompute(input + input_offset, input_w, valid_cols, x0, x1, x2, y0);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], avg);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], avg);
vst1q_lane_f32(output0, y0.val[0], 0);
vst1q_lane_f32(output1, y0.val[0], 1);
vst1q_lane_f32(output2, y0.val[0], 2);
vst1q_lane_f32(output3, y0.val[0], 3);
vst1q_lane_f32(output4, y0.val[1], 0);
vst1q_lane_f32(output5, y0.val[1], 1);
input_offset += 6 * Stride * input_w;
output_offset += 6 * output_w;
}
#endif
for (int h = remain_start; h < h_output_end; ++h) {
PoolingVal<P> val;
const int h_in_start = -padding_h + h * Stride;
for (int i = 0; i < 3; ++i) {
for (int w_in = w_start; w_in < w_end; ++w_in) {
val += input[(h_in_start + i) * input_w + w_in];
}
}
output[h * output_w + w_output] = val.Value();
}
}
template <PoolingType P, int Stride>
inline void Pooling3x3NormalRow(const float *input, const int h_output,
const int input_h, const int input_w,
const int padding_h, const int padding_w,
const int output_w, float *output) {
const int h_in_start = -padding_h + h_output * Stride;
const int h_in_end = h_in_start + 3;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int h_end = h_in_end < input_h ? h_in_end : input_h;
int valid_w_start = (padding_w + Stride - 1) / Stride;
int valid_w_end = (input_w - 3) / Stride + 1 + valid_w_start;
float *output_ptr = output + h_output * output_w;
// border left
POOLING3X3_NORMAL_BORDER(0, valid_w_start)
// middle
int remain_start = valid_w_start;
#if defined(__ARM_NEON) || defined(__ARM_NEON__)
int output_tiles = (valid_w_end - valid_w_start) / 6;
remain_start = valid_w_start + output_tiles * 6;
Pooling3x3NormalRowLoadInput<P, Stride> PoolingCompute;
float32x4x2_t x0, x1, x2, y0;
float32x4_t post = vdupq_n_f32(1.f / (3 * (h_end - h_start)));
for (int w = 0; w < output_tiles * 6; w += 6) {
int output_offset = valid_w_start + w;
int input_w_offset = output_offset * Stride - padding_w;
y0.val[0] = vPoolInitq_f32<P>();
y0.val[1] = vPoolInitq_f32<P>();
for (int h_in = h_start; h_in < h_end; ++h_in) {
PoolingCompute(input + h_in * input_w + input_w_offset, x0, x1, x2, y0);
}
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr + output_offset, y0.val[0]);
vst1_f32(output_ptr + output_offset + 4, vget_low_f32(y0.val[1]));
}
#endif // __ARM_NEON__
for (int w = remain_start; w < valid_w_end; ++w) {
PoolingVal<P> val;
int input_start = -padding_w + w * Stride;
for (int h_in = h_start; h_in < h_end; ++h_in) {
for (int j = 0; j < 3; ++j) {
val += input[h_in * input_w + j + input_start];
}
}
output_ptr[w] = val.Value();
}
// border right
POOLING3X3_NORMAL_BORDER(valid_w_end, output_w)
}
template <PoolingType P>
struct Pooling3x3<P, 1> {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output) {
const float *input_data = input.data<float>();
float *output_data = output->mutable_data<float>();
int input_h = input.dims()[2];
int input_w = input.dims()[3];
int output_h = output->dims()[2];
int output_w = output->dims()[3];
int padding_h = paddings[0];
int padding_w = paddings[1];
int image_size = input_h * input_w;
int out_image_size = output_h * output_w;
int valid_h_start = padding_h;
int valid_h = input_h - 2;
int valid_h_end = valid_h_start + valid_h;
int valid_w_start = padding_w;
int valid_w = input_w - 2;
int valid_w_end = valid_w_start + valid_w;
float avg = 1.f / 9;
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < output->dims()[0]; ++batch) {
for (int c = 0; c < output->dims()[1]; ++c) {
int channel = batch * output->dims()[1] + c;
const float *input_ptr = input_data + channel * image_size;
float *output_ptr = output_data + channel * out_image_size;
// top
for (int h = 0; h < valid_h_start; ++h) {
Pooling3x3NormalRow<P, 1>(input_ptr, h, input_h, input_w, padding_h,
padding_w, output_w, output_ptr);
}
// left
for (int w = 0; w < valid_w_start; ++w) {
Pooling3x3ValidCol<P, 1>(input_ptr, valid_h_start, valid_h_end, w,
input_h, input_w, padding_h, padding_w,
output_w, output_ptr);
}
// right
for (int w = valid_w_end; w < output_w; ++w) {
Pooling3x3ValidCol<P, 1>(input_ptr, valid_h_start, valid_h_end, w,
input_h, input_w, padding_h, padding_w,
output_w, output_ptr);
}
// bottom
for (int h = valid_h_end; h < output_h; ++h) {
Pooling3x3NormalRow<P, 1>(input_ptr, h, input_h, input_w, padding_h,
padding_w, output_w, output_ptr);
}
// valid
int output_w_tiles = valid_w / 6;
int output_w_remain = valid_w - output_w_tiles * 6;
for (int h = valid_h_start; h < valid_h_end - 3; h += 4) {
const float *input_ptr0 = input_ptr + (h - padding_h) * input_w;
const float *input_ptr1 = input_ptr0 + input_w;
const float *input_ptr2 = input_ptr1 + input_w;
const float *input_ptr3 = input_ptr2 + input_w;
const float *input_ptr4 = input_ptr3 + input_w;
const float *input_ptr5 = input_ptr4 + input_w;
float *output_ptr0 = output_ptr + h * output_w + valid_w_start;
float *output_ptr1 = output_ptr0 + output_w;
float *output_ptr2 = output_ptr1 + output_w;
float *output_ptr3 = output_ptr2 + output_w;
int remain = output_w_remain;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
float32x4x2_t x0, x1, x2;
float32x4x2_t y0, y1, y2;
float32x4_t post = vdupq_n_f32(1.f / 9);
for (int loop = 0; loop < output_w_tiles; ++loop) {
x0.val[0] = vld1q_f32(input_ptr0);
x0.val[1] = vld1q_f32(input_ptr0 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
x0.val[0] = vld1q_f32(input_ptr1);
x0.val[1] = vld1q_f32(input_ptr1 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
y1.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y1.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(y1.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(y1.val[1], y0.val[1]);
x0.val[0] = vld1q_f32(input_ptr2);
x0.val[1] = vld1q_f32(input_ptr2 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
y2.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y2.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y1.val[0] = vPoolPreq_f32<P>(y2.val[0], y1.val[0]);
y1.val[1] = vPoolPreq_f32<P>(y2.val[1], y1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(y2.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(y2.val[1], y0.val[1]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr0, y0.val[0]);
vst1_f32(output_ptr0 + 4, vget_low_f32(y0.val[1]));
x0.val[0] = vld1q_f32(input_ptr3);
x0.val[1] = vld1q_f32(input_ptr3 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y1.val[0] = vPoolPreq_f32<P>(y0.val[0], y1.val[0]);
y1.val[1] = vPoolPreq_f32<P>(y0.val[1], y1.val[1]);
y2.val[0] = vPoolPreq_f32<P>(y0.val[0], y2.val[0]);
y2.val[1] = vPoolPreq_f32<P>(y0.val[1], y2.val[1]);
y1.val[0] = vPoolPostq_f32<P>(y1.val[0], post);
y1.val[1] = vPoolPostq_f32<P>(y1.val[1], post);
vst1q_f32(output_ptr1, y1.val[0]);
vst1_f32(output_ptr1 + 4, vget_low_f32(y1.val[1]));
x0.val[0] = vld1q_f32(input_ptr4);
x0.val[1] = vld1q_f32(input_ptr4 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y2.val[0] = vPoolPreq_f32<P>(x0.val[0], y2.val[0]);
y2.val[1] = vPoolPreq_f32<P>(x0.val[1], y2.val[1]);
y2.val[0] = vPoolPostq_f32<P>(y2.val[0], post);
y2.val[1] = vPoolPostq_f32<P>(y2.val[1], post);
vst1q_f32(output_ptr2, y2.val[0]);
vst1_f32(output_ptr2 + 4, vget_low_f32(y2.val[1]));
x0.val[0] = vld1q_f32(input_ptr5);
x0.val[1] = vld1q_f32(input_ptr5 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr3, y0.val[0]);
vst1_f32(output_ptr3 + 4, vget_low_f32(y0.val[1]));
input_ptr0 += 6;
input_ptr1 += 6;
input_ptr2 += 6;
input_ptr3 += 6;
input_ptr4 += 6;
input_ptr5 += 6;
output_ptr0 += 6;
output_ptr1 += 6;
output_ptr2 += 6;
output_ptr3 += 6;
}
// remain width
if (remain >= 4) {
x0.val[0] = vld1q_f32(input_ptr0);
x0.val[1] = vld1q_f32(input_ptr0 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[0] = vld1q_f32(input_ptr1);
x0.val[1] = vld1q_f32(input_ptr1 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
y1.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(y1.val[0], y0.val[0]);
x0.val[0] = vld1q_f32(input_ptr2);
x0.val[1] = vld1q_f32(input_ptr2 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
y2.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y1.val[0] = vPoolPreq_f32<P>(y2.val[0], y1.val[0]);
y0.val[0] = vPoolPreq_f32<P>(y2.val[0], y0.val[0]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
vst1q_f32(output_ptr0, y0.val[0]);
x0.val[0] = vld1q_f32(input_ptr3);
x0.val[1] = vld1q_f32(input_ptr3 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y1.val[0] = vPoolPreq_f32<P>(y0.val[0], y1.val[0]);
y2.val[0] = vPoolPreq_f32<P>(y0.val[0], y2.val[0]);
y1.val[0] = vPoolPostq_f32<P>(y1.val[0], post);
vst1q_f32(output_ptr1, y1.val[0]);
x0.val[0] = vld1q_f32(input_ptr4);
x0.val[1] = vld1q_f32(input_ptr4 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y2.val[0] = vPoolPreq_f32<P>(x0.val[0], y2.val[0]);
y2.val[0] = vPoolPostq_f32<P>(y2.val[0], post);
vst1q_f32(output_ptr2, y2.val[0]);
x0.val[0] = vld1q_f32(input_ptr5);
x0.val[1] = vld1q_f32(input_ptr5 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
vst1q_f32(output_ptr3, y0.val[0]);
input_ptr0 += 4;
input_ptr1 += 4;
input_ptr2 += 4;
input_ptr3 += 4;
input_ptr4 += 4;
input_ptr5 += 4;
output_ptr0 += 4;
output_ptr1 += 4;
output_ptr2 += 4;
output_ptr3 += 4;
remain -= 4;
}
#endif // __ARM_NEON__
for (int r = 0; r < remain; ++r) {
float m0 = PoolPre<P>(input_ptr0[r], input_ptr0[r + 1]);
m0 = PoolPre<P>(m0, input_ptr0[r + 2]);
float m1 = PoolPre<P>(input_ptr1[r], input_ptr1[r + 1]);
m1 = PoolPre<P>(m1, input_ptr1[r + 2]);
float m2 = PoolPre<P>(input_ptr2[r], input_ptr2[r + 1]);
m2 = PoolPre<P>(m2, input_ptr2[r + 2]);
float m3 = PoolPre<P>(input_ptr3[r], input_ptr3[r + 1]);
m3 = PoolPre<P>(m3, input_ptr3[r + 2]);
float m4 = PoolPre<P>(input_ptr4[r], input_ptr4[r + 1]);
m4 = PoolPre<P>(m4, input_ptr4[r + 2]);
float m5 = PoolPre<P>(input_ptr5[r], input_ptr5[r + 1]);
m5 = PoolPre<P>(m5, input_ptr5[r + 2]);
m0 = PoolPre<P>(PoolPre<P>(m0, m1), m2);
m1 = PoolPre<P>(PoolPre<P>(m1, m2), m3);
m2 = PoolPre<P>(PoolPre<P>(m2, m3), m4);
m3 = PoolPre<P>(PoolPre<P>(m3, m4), m5);
output_ptr0[r] = PoolPost<P>(m0, avg);
output_ptr1[r] = PoolPost<P>(m1, avg);
output_ptr2[r] = PoolPost<P>(m2, avg);
output_ptr3[r] = PoolPost<P>(m3, avg);
}
}
// remain height
int start_h = valid_h_start + (valid_h & 0xFFFC);
for (int h = start_h; h < valid_h_end; ++h) {
const float *input_ptr0 = input_ptr + (h - padding_h) * input_w;
const float *input_ptr1 = input_ptr0 + input_w;
const float *input_ptr2 = input_ptr1 + input_w;
float *output_ptr0 = output_ptr + h * output_w + valid_w_start;
int remain = output_w_remain;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
float32x4x2_t x0, x1, x2, y0;
float32x4_t post = vdupq_n_f32(1.f / 9);
for (int loop = 0; loop < output_w_tiles; ++loop) {
x0.val[0] = vld1q_f32(input_ptr0);
x0.val[1] = vld1q_f32(input_ptr0 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
x0.val[0] = vld1q_f32(input_ptr1);
x0.val[1] = vld1q_f32(input_ptr1 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
x0.val[0] = vld1q_f32(input_ptr2);
x0.val[1] = vld1q_f32(input_ptr2 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x1.val[1] = vextq_f32(x0.val[1], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x2.val[1] = vextq_f32(x0.val[1], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr0, y0.val[0]);
vst1_f32(output_ptr0 + 4, vget_low_f32(y0.val[1]));
input_ptr0 += 6;
input_ptr1 += 6;
input_ptr2 += 6;
output_ptr0 += 6;
}
// remain width
if (remain >= 4) {
x0.val[0] = vld1q_f32(input_ptr0);
x0.val[1] = vld1q_f32(input_ptr0 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[0] = vld1q_f32(input_ptr1);
x0.val[1] = vld1q_f32(input_ptr1 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
x0.val[0] = vld1q_f32(input_ptr2);
x0.val[1] = vld1q_f32(input_ptr2 + 4);
x1.val[0] = vextq_f32(x0.val[0], x0.val[1], 1);
x2.val[0] = vextq_f32(x0.val[0], x0.val[1], 2);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x1.val[0]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
vst1q_f32(output_ptr0, y0.val[0]);
input_ptr0 += 4;
input_ptr1 += 4;
input_ptr2 += 4;
output_ptr0 += 4;
remain -= 4;
}
#endif // __ARM_NEON__
for (int r = 0; r < remain; ++r) {
float m0 = PoolPre<P>(input_ptr0[r], input_ptr0[r + 1]);
m0 = PoolPre<P>(m0, input_ptr0[r + 2]);
float m1 = PoolPre<P>(input_ptr1[r], input_ptr1[r + 1]);
m1 = PoolPre<P>(m1, input_ptr1[r + 2]);
float m2 = PoolPre<P>(input_ptr2[r], input_ptr2[r + 1]);
m2 = PoolPre<P>(m2, input_ptr2[r + 2]);
m0 = PoolPre<P>(PoolPre<P>(m0, m1), m2);
output_ptr0[r] = PoolPost<P>(m0, avg);
}
}
}
}
}
};
template <PoolingType P>
struct Pooling3x3<P, 2> {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output) {
const float *input_data = input.data<float>();
float *output_data = output->mutable_data<float>();
int input_h = input.dims()[2];
int input_w = input.dims()[3];
int output_h = output->dims()[2];
int output_w = output->dims()[3];
int padding_h = paddings[0];
int padding_w = paddings[1];
int image_size = input_h * input_w;
int out_image_size = output_h * output_w;
int valid_h_start = (padding_h + 1) / 2;
int valid_h = (input_h - 3) / 2 + 1;
int valid_h_end = valid_h_start + valid_h;
int valid_w_start = (padding_w + 1) / 2;
int valid_w = (input_w - 3) / 2 + 1;
int valid_w_end = valid_w_start + valid_w;
float avg = 1.f / 9;
#pragma omp parallel for collapse(2)
for (int batch = 0; batch < output->dims()[0]; ++batch) {
for (int c = 0; c < output->dims()[1]; ++c) {
int channel = batch * output->dims()[1] + c;
const float *input_ptr = input_data + channel * image_size;
float *output_ptr = output_data + channel * out_image_size;
// top
for (int h = 0; h < valid_h_start; ++h) {
Pooling3x3NormalRow<P, 2>(input_ptr, h, input_h, input_w, padding_h,
padding_w, output_w, output_ptr);
}
// left
for (int w = 0; w < valid_w_start; ++w) {
Pooling3x3ValidCol<P, 2>(input_ptr, valid_h_start, valid_h_end, w,
input_h, input_w, padding_h, padding_w,
output_w, output_ptr);
}
// right
for (int w = valid_w_end; w < output_w; ++w) {
Pooling3x3ValidCol<P, 2>(input_ptr, valid_h_start, valid_h_end, w,
input_h, input_w, padding_h, padding_w,
output_w, output_ptr);
}
// bottom
for (int h = valid_h_end; h < output_h; ++h) {
Pooling3x3NormalRow<P, 2>(input_ptr, h, input_h, input_w, padding_h,
padding_w, output_w, output_ptr);
}
// valid
int input_w_start = 2 * valid_w_start - padding_w;
int output_w_tiles = valid_w / 6;
int output_w_remain = valid_w - output_w_tiles * 6;
for (int h = valid_h_start; h < valid_h_end - 2; h += 3) {
size_t offset = (2 * h - padding_h) * input_w + input_w_start;
const float *input_ptr0 = input_ptr + offset;
const float *input_ptr1 = input_ptr0 + input_w;
const float *input_ptr2 = input_ptr1 + input_w;
const float *input_ptr3 = input_ptr2 + input_w;
const float *input_ptr4 = input_ptr3 + input_w;
const float *input_ptr5 = input_ptr4 + input_w;
const float *input_ptr6 = input_ptr5 + input_w;
float *output_ptr0 = output_ptr + h * output_w + valid_w_start;
float *output_ptr1 = output_ptr0 + output_w;
float *output_ptr2 = output_ptr1 + output_w;
int remain = output_w_remain;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
float32x4x2_t x0, x1, x2;
float32x4x2_t y0, y1, y2;
float32x4_t post = vdupq_n_f32(1.f / 9);
for (int loop = 0; loop < output_w_tiles; ++loop) {
x0 = vld2q_f32(input_ptr0);
x1 = vld2q_f32(input_ptr0 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
x0 = vld2q_f32(input_ptr1);
x1 = vld2q_f32(input_ptr1 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
x0 = vld2q_f32(input_ptr2);
x1 = vld2q_f32(input_ptr2 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
y1.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y1.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(y1.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(y1.val[1], y0.val[1]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr0, y0.val[0]);
vst1_f32(output_ptr0 + 4, vget_low_f32(y0.val[1]));
x0 = vld2q_f32(input_ptr3);
x1 = vld2q_f32(input_ptr3 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y1.val[0] = vPoolPreq_f32<P>(x0.val[0], y1.val[0]);
y1.val[1] = vPoolPreq_f32<P>(x0.val[1], y1.val[1]);
x0 = vld2q_f32(input_ptr4);
x1 = vld2q_f32(input_ptr4 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y1.val[0] = vPoolPreq_f32<P>(y0.val[0], y1.val[0]);
y1.val[1] = vPoolPreq_f32<P>(y0.val[1], y1.val[1]);
y1.val[0] = vPoolPostq_f32<P>(y1.val[0], post);
y1.val[1] = vPoolPostq_f32<P>(y1.val[1], post);
vst1q_f32(output_ptr1, y1.val[0]);
vst1_f32(output_ptr1 + 4, vget_low_f32(y1.val[1]));
x0 = vld2q_f32(input_ptr5);
x1 = vld2q_f32(input_ptr5 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
x0 = vld2q_f32(input_ptr6);
x1 = vld2q_f32(input_ptr6 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr2, y0.val[0]);
vst1_f32(output_ptr2 + 4, vget_low_f32(y0.val[1]));
input_ptr0 += 12;
input_ptr1 += 12;
input_ptr2 += 12;
input_ptr3 += 12;
input_ptr4 += 12;
input_ptr5 += 12;
input_ptr6 += 12;
output_ptr0 += 6;
output_ptr1 += 6;
output_ptr2 += 6;
}
// remain width
if (remain >= 4) {
x0 = vld2q_f32(input_ptr0);
x1.val[0] = vdupq_n_f32(input_ptr0[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0 = vld2q_f32(input_ptr1);
x1.val[0] = vdupq_n_f32(input_ptr1[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
x0 = vld2q_f32(input_ptr2);
x1.val[0] = vdupq_n_f32(input_ptr2[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
y1.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(y1.val[0], y0.val[0]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
vst1q_f32(output_ptr0, y0.val[0]);
x0 = vld2q_f32(input_ptr3);
x1.val[0] = vdupq_n_f32(input_ptr3[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y1.val[0] = vPoolPreq_f32<P>(x0.val[0], y1.val[0]);
x0 = vld2q_f32(input_ptr4);
x1.val[0] = vdupq_n_f32(input_ptr4[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y1.val[0] = vPoolPreq_f32<P>(y0.val[0], y1.val[0]);
y1.val[0] = vPoolPostq_f32<P>(y1.val[0], post);
vst1q_f32(output_ptr1, y1.val[0]);
x0 = vld2q_f32(input_ptr5);
x1.val[0] = vdupq_n_f32(input_ptr5[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
x0 = vld2q_f32(input_ptr6);
x1.val[0] = vdupq_n_f32(input_ptr6[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
vst1q_f32(output_ptr2, y0.val[0]);
input_ptr0 += 8;
input_ptr1 += 8;
input_ptr2 += 8;
input_ptr3 += 8;
input_ptr4 += 8;
input_ptr5 += 8;
input_ptr6 += 8;
output_ptr0 += 4;
output_ptr1 += 4;
output_ptr2 += 4;
remain -= 4;
}
#endif // __ARM_NEON__
for (int r = 0; r < remain; ++r) {
float m0 = PoolPre<P>(input_ptr0[2 * r], input_ptr0[2 * r + 1]);
m0 = PoolPre<P>(m0, input_ptr0[2 * r + 2]);
float m1 = PoolPre<P>(input_ptr1[2 * r], input_ptr1[2 * r + 1]);
m1 = PoolPre<P>(m1, input_ptr1[2 * r + 2]);
float m2 = PoolPre<P>(input_ptr2[2 * r], input_ptr2[2 * r + 1]);
m2 = PoolPre<P>(m2, input_ptr2[2 * r + 2]);
float m3 = PoolPre<P>(input_ptr3[2 * r], input_ptr3[2 * r + 1]);
m3 = PoolPre<P>(m3, input_ptr3[2 * r + 2]);
float m4 = PoolPre<P>(input_ptr4[2 * r], input_ptr4[2 * r + 1]);
m4 = PoolPre<P>(m4, input_ptr4[2 * r + 2]);
float m5 = PoolPre<P>(input_ptr5[2 * r], input_ptr5[2 * r + 1]);
m5 = PoolPre<P>(m5, input_ptr5[2 * r + 2]);
float m6 = PoolPre<P>(input_ptr6[2 * r], input_ptr6[2 * r + 1]);
m6 = PoolPre<P>(m6, input_ptr6[2 * r + 2]);
m0 = PoolPre<P>(PoolPre<P>(m0, m1), m2);
m1 = PoolPre<P>(PoolPre<P>(m2, m3), m4);
m2 = PoolPre<P>(PoolPre<P>(m4, m5), m6);
output_ptr0[r] = PoolPost<P>(m0, avg);
output_ptr1[r] = PoolPost<P>(m1, avg);
output_ptr2[r] = PoolPost<P>(m2, avg);
}
}
// remain height
int start_h = valid_h_start + valid_h / 3 * 3;
for (int h = start_h; h < valid_h_end; ++h) {
size_t offset = (2 * h - padding_h) * input_w + input_w_start;
const float *input_ptr0 = input_ptr + offset;
const float *input_ptr1 = input_ptr0 + input_w;
const float *input_ptr2 = input_ptr1 + input_w;
float *output_ptr0 = output_ptr + h * output_w + valid_w_start;
int remain = output_w_remain;
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
float32x4x2_t x0, x1, x2, y0;
float32x4_t post = vdupq_n_f32(1.f / 9);
for (int loop = 0; loop < output_w_tiles; ++loop) {
x0 = vld2q_f32(input_ptr0);
x1 = vld2q_f32(input_ptr0 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
x0 = vld2q_f32(input_ptr1);
x1 = vld2q_f32(input_ptr1 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
x0 = vld2q_f32(input_ptr2);
x1 = vld2q_f32(input_ptr2 + 8);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x2.val[1] = vextq_f32(x1.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[1] = vPoolPreq_f32<P>(x1.val[0], x1.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0.val[1] = vPoolPreq_f32<P>(x0.val[1], x2.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[1] = vPoolPreq_f32<P>(x0.val[1], y0.val[1]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
y0.val[1] = vPoolPostq_f32<P>(y0.val[1], post);
vst1q_f32(output_ptr0, y0.val[0]);
vst1_f32(output_ptr0 + 4, vget_low_f32(y0.val[1]));
input_ptr0 += 12;
input_ptr1 += 12;
input_ptr2 += 12;
output_ptr0 += 6;
}
// remain width
if (remain >= 4) {
x0 = vld2q_f32(input_ptr0);
x1.val[0] = vdupq_n_f32(input_ptr0[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
x0 = vld2q_f32(input_ptr1);
x1.val[0] = vdupq_n_f32(input_ptr1[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
x0 = vld2q_f32(input_ptr2);
x1.val[0] = vdupq_n_f32(input_ptr2[8]);
x2.val[0] = vextq_f32(x0.val[0], x1.val[0], 1);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x0.val[1]);
x0.val[0] = vPoolPreq_f32<P>(x0.val[0], x2.val[0]);
y0.val[0] = vPoolPreq_f32<P>(x0.val[0], y0.val[0]);
y0.val[0] = vPoolPostq_f32<P>(y0.val[0], post);
vst1q_f32(output_ptr0, y0.val[0]);
input_ptr0 += 8;
input_ptr1 += 8;
input_ptr2 += 8;
output_ptr0 += 4;
remain -= 4;
}
#endif // __ARM_NEON__
for (int r = 0; r < remain; ++r) {
float m0 = PoolPre<P>(input_ptr0[2 * r], input_ptr0[2 * r + 1]);
m0 = PoolPre<P>(m0, input_ptr0[2 * r + 2]);
float m1 = PoolPre<P>(input_ptr1[2 * r], input_ptr1[2 * r + 1]);
m1 = PoolPre<P>(m1, input_ptr1[2 * r + 2]);
float m2 = PoolPre<P>(input_ptr2[2 * r], input_ptr2[2 * r + 1]);
m2 = PoolPre<P>(m2, input_ptr2[2 * r + 2]);
m0 = PoolPre<P>(PoolPre<P>(m0, m1), m2);
output_ptr0[r] = PoolPost<P>(m0, avg);
}
}
}
}
}
};
template struct Pooling3x3<MAX, 1>;
template struct Pooling3x3<AVG, 1>;
template struct Pooling3x3<MAX, 2>;
template struct Pooling3x3<AVG, 2>;
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif // POOL_OP
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef QUANT_OP
#pragma once
#include <cmath>
#include "common/types.h"
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#include <arm_neon.h>
#endif
namespace paddle_mobile {
namespace operators {
namespace math {
template <RoundType R = ROUND_NEAREST_TOWARDS_ZERO>
inline int8_t Round(const float &x) {
return static_cast<int8_t>(x);
}
template <>
inline int8_t Round<ROUND_NEAREST_AWAY_ZERO>(const float &x) {
return std::round(x);
}
template <>
inline int8_t Round<ROUND_NEAREST_TO_EVEN>(const float &x) {
float v = std::round(x);
int32_t q = static_cast<int32_t>(v);
if (std::abs(std::abs(q - v) - 0.5) <= 0) {
if (std::abs(q) % 2 != 0) {
q = q + ((q > 0) ? -1 : 1);
}
}
return static_cast<int8_t>(q);
}
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
template <RoundType R = ROUND_NEAREST_TOWARDS_ZERO>
inline int32x4_t vRoundq_f32(const float32x4_t &x) {
return vcvtq_s32_f32(x);
}
template <>
inline int32x4_t vRoundq_f32<ROUND_NEAREST_AWAY_ZERO>(const float32x4_t &x) {
float32x4_t plus = vdupq_n_f32(0.5);
float32x4_t minus = vdupq_n_f32(-0.5);
float32x4_t zero = vdupq_n_f32(0);
uint32x4_t more_than_zero = vcgtq_f32(x, zero);
float32x4_t temp = vbslq_f32(more_than_zero, plus, minus);
temp = vaddq_f32(x, temp);
int32x4_t ret = vcvtq_s32_f32(temp);
return ret;
}
template <>
inline int32x4_t vRoundq_f32<ROUND_NEAREST_TO_EVEN>(const float32x4_t &x) {
float32x4_t point5 = vdupq_n_f32(0.5);
int32x4_t one = vdupq_n_s32(1);
int32x4_t zero = vdupq_n_s32(0);
int32x4_t rnd = math::vRoundq_f32<ROUND_NEAREST_AWAY_ZERO>(x);
float32x4_t frnd = vcvtq_f32_s32(rnd);
frnd = vsubq_f32(frnd, x);
frnd = vabsq_f32(frnd);
uint32x4_t equal_point5 = vceqq_f32(frnd, point5);
int32x4_t abs_rnd = vabsq_s32(rnd);
abs_rnd = vandq_s32(abs_rnd, one);
uint32x4_t not_mod2 = vreinterpretq_u32_s32(abs_rnd);
uint32x4_t mask = vandq_u32(equal_point5, not_mod2);
uint32x4_t more_than_zero = vcgtq_s32(rnd, zero);
more_than_zero = vandq_u32(more_than_zero, vreinterpretq_u32_s32(one));
mask = veorq_u32(more_than_zero, mask);
more_than_zero = veorq_u32(more_than_zero, vreinterpretq_u32_s32(one));
mask = vaddq_u32(more_than_zero, mask);
int32x4_t smask = vreinterpretq_s32_u32(mask);
smask = vsubq_s32(smask, one);
rnd = vaddq_s32(rnd, smask);
return rnd;
}
#endif // __ARM_NEON__
} // namespace math
} // namespace operators
} // namespace paddle_mobile
#endif // QUANT_OP
...@@ -1632,10 +1632,6 @@ class FusionFcParam : public OpParam { ...@@ -1632,10 +1632,6 @@ class FusionFcParam : public OpParam {
x_num_col_dims_ = GetAttr<int>("x_num_col_dims", attrs); x_num_col_dims_ = GetAttr<int>("x_num_col_dims", attrs);
y_num_col_dims_ = GetAttr<int>("y_num_col_dims", attrs); y_num_col_dims_ = GetAttr<int>("y_num_col_dims", attrs);
axis_ = GetAttr<int>("axis", attrs); axis_ = GetAttr<int>("axis", attrs);
#ifdef FUSION_FC_INT8_OP
scale_ = InputScaleFrom<GType>(inputs, scope);
#endif
} }
GType *InputX() const { return input_x_; } GType *InputX() const { return input_x_; }
...@@ -1660,16 +1656,8 @@ class FusionFcParam : public OpParam { ...@@ -1660,16 +1656,8 @@ class FusionFcParam : public OpParam {
int y_num_col_dims_; int y_num_col_dims_;
int axis_; int axis_;
#ifdef FUSION_FC_INT8_OP
public:
const RType *InputScale() const { return scale_; }
private:
RType *scale_;
#endif
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
private: private: // NOLINT
fpga::SplitConvArgs fpga_conv_args; fpga::SplitConvArgs fpga_conv_args;
public: public:
...@@ -1719,19 +1707,7 @@ class FusionConvAddReluParam : public FusionConvAddParam<DeviceType> { ...@@ -1719,19 +1707,7 @@ class FusionConvAddReluParam : public FusionConvAddParam<DeviceType> {
FusionConvAddReluParam(const VariableNameMap &inputs, FusionConvAddReluParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) const AttributeMap &attrs, const Scope &scope)
: FusionConvAddParam<DeviceType>(inputs, outputs, attrs, scope) { : FusionConvAddParam<DeviceType>(inputs, outputs, attrs, scope) {}
#ifdef FUSION_CONVADDRELU_INT8_OP
scale_ = OpParam::InputScaleFrom<GType>(inputs, scope);
#endif
}
#ifdef FUSION_CONVADDRELU_INT8_OP
typedef typename DtypeTensorTrait<DeviceType>::gtype GType;
typedef typename DtypeTensorTrait<DeviceType>::rtype RType;
const RType *InputScale() const { return scale_; }
private:
RType *scale_;
#endif
}; };
#endif #endif
...@@ -2554,18 +2530,13 @@ class QuantizeParam : public OpParam { ...@@ -2554,18 +2530,13 @@ class QuantizeParam : public OpParam {
// scale = max(abs(x)) // scale = max(abs(x))
online_scale_ = OpParam::GetVarValue<GType>("OutScale", outputs, scope); online_scale_ = OpParam::GetVarValue<GType>("OutScale", outputs, scope);
// offline // offline
if (HasAttr("static_scale", attrs)) { if (inputs.count("InScale")) {
is_static_ = true; offline_ = true;
static_scale_ = GetAttr<float>("static_scale", attrs); offline_scale_ = OpParam::GetVarValue<GType>("InScale", inputs, scope);
} }
// x = round(scale * x) // x = round(scale * x)
if (HasAttr("round_type", attrs)) { if (OpParam::HasAttr("round_type", attrs)) {
round_type_ = GetAttr<RoundType>("round_type", attrs); round_type_ = OpParam::GetAttr<RoundType>("round_type", attrs);
}
// get paddings
paddings_ = std::vector<int>({0, 0});
if (HasAttr("paddings", attrs)) {
paddings_ = GetAttr<vector<int>>("paddings", attrs);
} }
} }
...@@ -2575,17 +2546,13 @@ class QuantizeParam : public OpParam { ...@@ -2575,17 +2546,13 @@ class QuantizeParam : public OpParam {
// op output // op output
RType *output_; RType *output_;
RType *online_scale_; RType *online_scale_;
// if static scale or not // quantize offline scale
bool is_static_ = false; RType *offline_scale_;
// quantize scale // if offine scale or not
float static_scale_ = 1.0f; bool offline_ = false;
// round method type // round method type
// nearest_zero and nearest_even is valid currently
// RoundType round_type_ = ROUND_NEAREST_AWAY_ZERO; // RoundType round_type_ = ROUND_NEAREST_AWAY_ZERO;
RoundType round_type_ = ROUND_NEAREST_TOWARDS_ZERO; RoundType round_type_ = ROUND_NEAREST_TOWARDS_ZERO;
// optional paddings
std::vector<int> paddings_;
int8_t padding_val_;
}; };
#endif #endif
...@@ -2599,15 +2566,13 @@ class DequantizeParam : public OpParam { ...@@ -2599,15 +2566,13 @@ class DequantizeParam : public OpParam {
DequantizeParam(const VariableNameMap &inputs, const VariableNameMap &outputs, DequantizeParam(const VariableNameMap &inputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) { const AttributeMap &attrs, const Scope &scope) {
input_ = InputXFrom<GType>(inputs, scope); input_ = InputXFrom<GType>(inputs, scope);
if (outputs.count("Out")) {
output_ = OutFrom<GType>(outputs, scope); output_ = OutFrom<GType>(outputs, scope);
}
activation_scale_ = OpParam::GetVarValue<GType>("Scale", inputs, scope); activation_scale_ = OpParam::GetVarValue<GType>("Scale", inputs, scope);
// dequantization is performed as x = x / static_scale / online_scale // dequantization is performed as x = x / static_scale / online_scale
if (HasAttr("weight_scale", attrs)) { if (OpParam::HasAttr("weight_scale", attrs)) {
weight_scale_ = GetAttr<float>("weight_scale", attrs); weight_scale_ = OpParam::GetAttr<float>("weight_scale", attrs);
} else { } else {
weight_scale_ = GetAttr<float>("max_range", attrs); weight_scale_ = OpParam::GetAttr<float>("max_range", attrs);
} }
} }
...@@ -2621,9 +2586,11 @@ class DequantizeParam : public OpParam { ...@@ -2621,9 +2586,11 @@ class DequantizeParam : public OpParam {
}; };
#endif #endif
#if defined(FUSION_DEQUANT_ADD_BN_OP) || \ #if defined(FUSION_DEQUANT_BN_OP) || defined(FUSION_DEQUANT_ADD_BN_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_OP) || \ defined(FUSION_DEQUANT_ADD_BN_RELU_OP) || \
defined(FUSION_DEQUANT_BN_RELU_OP) || defined(FUSION_DEQUANT_BN_OP) defined(FUSION_DEQUANT_BN_RELU_OP) || \
defined(FUSION_DEQUANT_ADD_BN_QUANT_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP)
template <typename Dtype> template <typename Dtype>
class FusionDequantBNParam : public DequantizeParam<Dtype> { class FusionDequantBNParam : public DequantizeParam<Dtype> {
typedef typename DtypeTensorTrait<Dtype>::gtype GType; typedef typename DtypeTensorTrait<Dtype>::gtype GType;
...@@ -2640,10 +2607,6 @@ class FusionDequantBNParam : public DequantizeParam<Dtype> { ...@@ -2640,10 +2607,6 @@ class FusionDequantBNParam : public DequantizeParam<Dtype> {
bn_scale_ = OpParam::GetVarValue<GType>("BNScale", inputs, scope); bn_scale_ = OpParam::GetVarValue<GType>("BNScale", inputs, scope);
bn_bias_ = OpParam::GetVarValue<GType>("BNBias", inputs, scope); bn_bias_ = OpParam::GetVarValue<GType>("BNBias", inputs, scope);
epsilon_ = OpParam::GetAttr<float>("epsilon", attrs); epsilon_ = OpParam::GetAttr<float>("epsilon", attrs);
// output
if (outputs.count("Y")) {
this->output_ = OpParam::OutputYFrom<GType>(outputs, scope);
}
} }
public: public:
...@@ -2656,7 +2619,10 @@ class FusionDequantBNParam : public DequantizeParam<Dtype> { ...@@ -2656,7 +2619,10 @@ class FusionDequantBNParam : public DequantizeParam<Dtype> {
}; };
#endif #endif
#if defined(FUSION_DEQUANT_ADD_BN_RELU_OP) || defined(FUSION_DEQUANT_ADD_BN_OP) #if defined(FUSION_DEQUANT_ADD_BN_RELU_OP) || \
defined(FUSION_DEQUANT_ADD_BN_OP) || \
defined(FUSION_DEQUANT_ADD_BN_QUANT_OP) || \
defined(FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP)
template <typename Dtype> template <typename Dtype>
class FusionDequantAddBNParam : public FusionDequantBNParam<Dtype> { class FusionDequantAddBNParam : public FusionDequantBNParam<Dtype> {
typedef typename DtypeTensorTrait<Dtype>::gtype GType; typedef typename DtypeTensorTrait<Dtype>::gtype GType;
...@@ -2670,10 +2636,6 @@ class FusionDequantAddBNParam : public FusionDequantBNParam<Dtype> { ...@@ -2670,10 +2636,6 @@ class FusionDequantAddBNParam : public FusionDequantBNParam<Dtype> {
// element wise add params // element wise add params
axis_ = OpParam::GetAttr<int>("axis", attrs); axis_ = OpParam::GetAttr<int>("axis", attrs);
bias_ = OpParam::InputYFrom<GType>(inputs, scope); bias_ = OpParam::InputYFrom<GType>(inputs, scope);
// output
if (outputs.count("Y")) {
this->output_ = OpParam::OutputYFrom<GType>(outputs, scope);
}
} }
public: public:
...@@ -2683,41 +2645,39 @@ class FusionDequantAddBNParam : public FusionDequantBNParam<Dtype> { ...@@ -2683,41 +2645,39 @@ class FusionDequantAddBNParam : public FusionDequantBNParam<Dtype> {
}; };
#endif #endif
#ifdef FUSION_DEQUANT_BN_RELU_OP #ifdef FUSION_DEQUANT_ADD_BN_QUANT_OP
template <typename Dtype> template <typename Dtype>
class FusionDequantBNReluParam : public FusionDequantBNParam<Dtype> { class FusionDequantAddBNQuantParam : public FusionDequantAddBNParam<Dtype> {
typedef typename DtypeTensorTrait<Dtype>::gtype GType; typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType; typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public: public:
FusionDequantBNReluParam(const VariableNameMap &inputs, FusionDequantAddBNQuantParam(const VariableNameMap &inputs,
const VariableNameMap &outputs, const VariableNameMap &outputs,
const AttributeMap &attrs, const Scope &scope) const AttributeMap &attrs, const Scope &scope)
: FusionDequantBNParam<Dtype>(inputs, outputs, attrs, scope) { : FusionDequantAddBNParam<Dtype>(inputs, outputs, attrs, scope) {
// output // scale output
if (outputs.count("Out")) { online_scale_ = OpParam::GetVarValue<GType>("OutScale", outputs, scope);
this->output_ = OpParam::OutFrom<GType>(outputs, scope); // offline
if (inputs.count("InScale")) {
offline_ = true;
offline_scale_ = OpParam::GetVarValue<GType>("InScale", inputs, scope);
}
// x = round(scale * x)
if (OpParam::HasAttr("round_type", attrs)) {
round_type_ = OpParam::GetAttr<RoundType>("round_type", attrs);
} }
} }
};
#endif
#ifdef FUSION_DEQUANT_ADD_BN_RELU_OP
template <typename Dtype>
class FusionDequantAddBNReluParam : public FusionDequantAddBNParam<Dtype> {
typedef typename DtypeTensorTrait<Dtype>::gtype GType;
typedef typename DtypeTensorTrait<Dtype>::rtype RType;
public: public:
FusionDequantAddBNReluParam(const VariableNameMap &inputs, RType *online_scale_;
const VariableNameMap &outputs, // quantize offline scale
const AttributeMap &attrs, const Scope &scope) RType *offline_scale_;
: FusionDequantAddBNParam<Dtype>(inputs, outputs, attrs, scope) { // if offine scale or not
// output bool offline_ = false;
if (outputs.count("Out")) { // round method type
this->output_ = OpParam::OutFrom<GType>(outputs, scope); // RoundType round_type_ = ROUND_NEAREST_AWAY_ZERO;
} RoundType round_type_ = ROUND_NEAREST_TOWARDS_ZERO;
}
}; };
#endif #endif
......
...@@ -22,10 +22,7 @@ namespace operators { ...@@ -22,10 +22,7 @@ namespace operators {
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
void QuantizeOp<DeviceType, T>::InferShape() const { void QuantizeOp<DeviceType, T>::InferShape() const {
auto input_dims = this->param_.input_->dims(); const auto &input_dims = this->param_.input_->dims();
const std::vector<int> &paddings = this->param_.paddings_;
input_dims[2] += 2 * paddings[0];
input_dims[3] += 2 * paddings[1];
this->param_.output_->Resize(input_dims); this->param_.output_->Resize(input_dims);
auto scale_dims = framework::make_ddim(std::vector<int>{1}); auto scale_dims = framework::make_ddim(std::vector<int>{1});
this->param_.online_scale_->Resize(scale_dims); this->param_.online_scale_->Resize(scale_dims);
......
...@@ -24,17 +24,19 @@ void ReluOp<Dtype, T>::InferShape() const { ...@@ -24,17 +24,19 @@ void ReluOp<Dtype, T>::InferShape() const {
this->param_.Out()->Resize(input_dims); this->param_.Out()->Resize(input_dims);
} }
template <typename Dtype, typename T>
void Relu6Op<Dtype, T>::InferShape() const {
auto input_dims = this->param_.InputX()->dims();
this->param_.Out()->Resize(input_dims);
}
} // namespace operators } // namespace operators
} // namespace paddle_mobile } // namespace paddle_mobile
/*
* @b 每一个 op 都需要注册一下的,
* USE_OP的参数 和 REGISTER_OPERATOR的第一个参数
* 都是需要和model中类型对应起来的
* */
namespace ops = paddle_mobile::operators; namespace ops = paddle_mobile::operators;
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
REGISTER_OPERATOR_CPU(relu, ops::ReluOp); REGISTER_OPERATOR_CPU(relu, ops::ReluOp);
REGISTER_OPERATOR_CPU(relu6, ops::Relu6Op);
#endif #endif
#ifdef PADDLE_MOBILE_MALI_GPU #ifdef PADDLE_MOBILE_MALI_GPU
REGISTER_OPERATOR_MALI_GPU(relu, ops::ReluOp); REGISTER_OPERATOR_MALI_GPU(relu, ops::ReluOp);
......
...@@ -25,25 +25,34 @@ limitations under the License. */ ...@@ -25,25 +25,34 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
namespace operators { namespace operators {
using paddle_mobile::framework::Tensor;
template <typename DeviceType, typename T> template <typename DeviceType, typename T>
class ReluOp : public framework::OperatorWithKernel< class ReluOp : public framework::OperatorWithKernel<
DeviceType, ReluParam<DeviceType>, DeviceType, ReluParam<DeviceType>,
operators::ReluKernel<DeviceType, T>> { operators::ReluKernel<DeviceType, T>> {
public: public:
/*
* @b op 的实例化方法, 需要调用父类的实例化方法, 以及实例化自己的参数结构体
* */
ReluOp(const std::string &type, const VariableNameMap &inputs, ReluOp(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs, const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope) std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, ReluParam<DeviceType>, : framework::OperatorWithKernel<DeviceType, ReluParam<DeviceType>,
operators::ReluKernel<DeviceType, T>>( operators::ReluKernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {} type, inputs, outputs, attrs, scope) {}
void InferShape() const override; void InferShape() const override;
};
template <typename DeviceType, typename T>
class Relu6Op : public framework::OperatorWithKernel<
DeviceType, ReluParam<DeviceType>,
operators::Relu6Kernel<DeviceType, T>> {
public:
Relu6Op(const std::string &type, const VariableNameMap &inputs,
const VariableNameMap &outputs, const framework::AttributeMap &attrs,
std::shared_ptr<framework::Scope> scope)
: framework::OperatorWithKernel<DeviceType, ReluParam<DeviceType>,
operators::Relu6Kernel<DeviceType, T>>(
type, inputs, outputs, attrs, scope) {}
protected: void InferShape() const override;
}; };
} // namespace operators } // namespace operators
......
...@@ -324,10 +324,6 @@ if (NOT FOUND_MATCH) ...@@ -324,10 +324,6 @@ if (NOT FOUND_MATCH)
ADD_EXECUTABLE(test-conv-add-relu-op operators/test_conv_add_relu_op.cpp test_helper.h test_include.h executor_for_test.h) ADD_EXECUTABLE(test-conv-add-relu-op operators/test_conv_add_relu_op.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-conv-add-relu-op paddle-mobile) target_link_libraries(test-conv-add-relu-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-conv-add-relu-int8-op operators/test_fusion_conv_add_relu_int8_op.cpp test_helper.h test_include.h)
target_link_libraries(test-conv-add-relu-int8-op paddle-mobile)
# gen test # gen test
ADD_EXECUTABLE(test-conv-add-bn-relu-op operators/test_fusion_conv_add_bn_relu_op.cpp test_helper.h test_include.h executor_for_test.h) ADD_EXECUTABLE(test-conv-add-bn-relu-op operators/test_fusion_conv_add_bn_relu_op.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-conv-add-bn-relu-op paddle-mobile) target_link_libraries(test-conv-add-bn-relu-op paddle-mobile)
......
...@@ -73,14 +73,14 @@ int main() { ...@@ -73,14 +73,14 @@ int main() {
// float // float
// warm-up 10 times // warm-up 10 times
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul<float>( paddle_mobile::operators::math::matmul<float, float>(
aa, false, bb, false, static_cast<float>(1), &cc, static_cast<float>(0), aa, false, bb, false, static_cast<float>(1), &cc, static_cast<float>(0),
false, nullptr); false, nullptr);
} }
auto time_start0 = time(); auto time_start0 = time();
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul<float>( paddle_mobile::operators::math::matmul<float, float>(
aa, false, bb, false, static_cast<float>(1), &cc, static_cast<float>(0), aa, false, bb, false, static_cast<float>(1), &cc, static_cast<float>(0),
false, nullptr); false, nullptr);
} }
...@@ -91,14 +91,14 @@ int main() { ...@@ -91,14 +91,14 @@ int main() {
// int8_t without bias // int8_t without bias
// warm-up 10 times // warm-up 10 times
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul<float, int32_t>( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(1), &cc_int32, aa_int8, false, bb_int8, false, static_cast<float>(1), &cc_int32,
static_cast<float>(0)); static_cast<float>(0));
} }
auto time_start1 = time(); auto time_start1 = time();
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul<float, int32_t>( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(1), &cc_int32, aa_int8, false, bb_int8, false, static_cast<float>(1), &cc_int32,
static_cast<float>(0)); static_cast<float>(0));
} }
...@@ -109,13 +109,13 @@ int main() { ...@@ -109,13 +109,13 @@ int main() {
// int8_t with bias, column element wise add // int8_t with bias, column element wise add
// warm-up 10 times // warm-up 10 times
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8, aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8,
static_cast<float>(0), false, bias_data_col, false); static_cast<float>(0), false, bias_data_col, false);
} }
auto time_start2 = time(); auto time_start2 = time();
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8, aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8,
static_cast<float>(0), false, bias_data_col, false); static_cast<float>(0), false, bias_data_col, false);
} }
...@@ -126,13 +126,13 @@ int main() { ...@@ -126,13 +126,13 @@ int main() {
// int8_t with bias, row element wise add // int8_t with bias, row element wise add
// warm-up 10 times // warm-up 10 times
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8, aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8,
static_cast<float>(0), false, bias_data_row, true); static_cast<float>(0), false, bias_data_row, true);
} }
auto time_start3 = time(); auto time_start3 = time();
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8, aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8,
static_cast<float>(0), false, bias_data_row, true); static_cast<float>(0), false, bias_data_row, true);
} }
...@@ -143,13 +143,13 @@ int main() { ...@@ -143,13 +143,13 @@ int main() {
// int8_t with bias&relu // int8_t with bias&relu
// warm-up 10 times // warm-up 10 times
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8, aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8,
static_cast<float>(0), true, bias_data_col, false); static_cast<float>(0), true, bias_data_col, false);
} }
auto time_start4 = time(); auto time_start4 = time();
for (int j = 0; j < 10; ++j) { for (int j = 0; j < 10; ++j) {
paddle_mobile::operators::math::matmul( paddle_mobile::operators::math::matmul<int8_t, int32_t>(
aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8, aa_int8, false, bb_int8, false, static_cast<float>(0.618), &cc_int8,
static_cast<float>(0), true, bias_data_col, false); static_cast<float>(0), true, bias_data_col, false);
} }
......
...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and ...@@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <iostream> #include <iostream>
#include <sstream>
#include "../test_helper.h" #include "../test_helper.h"
#include "../test_include.h" #include "../test_include.h"
...@@ -59,6 +60,13 @@ int main(int argc, char* argv[]) { ...@@ -59,6 +60,13 @@ int main(int argc, char* argv[]) {
} }
auto time4 = time(); auto time4 = time();
std::cout << "predict cost :" << time_diff(time3, time4) / 10 << "ms\n"; std::cout << "predict cost :" << time_diff(time3, time4) / 10 << "ms\n";
std::ostringstream os("output tensor size: ");
os << output->numel() << "\n" << output->data<float>()[0];
for (int i = 1; i < output->numel(); ++i) {
os << ", " << output->data<float>()[i];
}
std::string output_str = os.str();
std::cout << output_str << std::endl;
} }
return 0; return 0;
} }
...@@ -16,16 +16,30 @@ limitations under the License. */ ...@@ -16,16 +16,30 @@ limitations under the License. */
#include "../test_helper.h" #include "../test_helper.h"
#include "../test_include.h" #include "../test_include.h"
int main() { int main(int argc, char* argv[]) {
if (argc < 2) {
std::cout << "Usage: ./test_benchmark feed_shape [thread_num] [use_fuse]\n"
<< "feed_shape: input tensor shape, such as 1,3,224,224.\n"
<< "thread_num: optional int, threads count, default is 1.\n"
<< "use_fuse: optional bool, default is 0.\n";
return 1;
}
int thread_num = 1;
bool optimize = false;
char* feed_shape = argv[1];
if (argc >= 3) {
thread_num = atoi(argv[2]);
}
if (argc >= 4) {
optimize = atoi(argv[3]);
}
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA
paddle_mobile::PaddleMobile<paddle_mobile::FPGA> paddle_mobile; paddle_mobile::PaddleMobile<paddle_mobile::FPGA> paddle_mobile;
#endif #endif
#ifdef PADDLE_MOBILE_CPU #ifdef PADDLE_MOBILE_CPU
paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile; paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile;
#endif #endif
paddle_mobile.SetThreadNum(thread_num);
paddle_mobile.SetThreadNum(1);
bool optimize = true;
auto time1 = time(); auto time1 = time();
if (paddle_mobile.Load(g_googlenet, optimize)) { if (paddle_mobile.Load(g_googlenet, optimize)) {
auto time2 = paddle_mobile::time(); auto time2 = paddle_mobile::time();
...@@ -34,6 +48,11 @@ int main() { ...@@ -34,6 +48,11 @@ int main() {
std::vector<float> input; std::vector<float> input;
std::vector<float> output; std::vector<float> output;
std::vector<int64_t> dims{1, 3, 224, 224}; std::vector<int64_t> dims{1, 3, 224, 224};
if (feed_shape) {
sscanf(feed_shape, "%d,%d,%d", &dims[1], &dims[2], &dims[3]);
}
std::cout << "feed shape: [" << dims[0] << ", " << dims[1] << ", "
<< dims[2] << ", " << dims[3] << "]\n";
GetInput<float>(g_test_image_1x3x224x224, &input, dims); GetInput<float>(g_test_image_1x3x224x224, &input, dims);
// warmup // warmup
for (int i = 0; i < 10; ++i) { for (int i = 0; i < 10; ++i) {
...@@ -44,7 +63,6 @@ int main() { ...@@ -44,7 +63,6 @@ int main() {
output = paddle_mobile.Predict(input, dims); output = paddle_mobile.Predict(input, dims);
} }
auto time4 = time(); auto time4 = time();
std::cout << "predict cost: " << time_diff(time3, time4) / 10 << "ms\n"; std::cout << "predict cost: " << time_diff(time3, time4) / 10 << "ms\n";
} }
return 0; return 0;
......
...@@ -129,7 +129,8 @@ void conv2d(const framework::Tensor *input, const framework::Tensor *filter, ...@@ -129,7 +129,8 @@ void conv2d(const framework::Tensor *input, const framework::Tensor *filter,
} }
template <typename Itype, typename Otype, int Kernel, int Pad, int Stride> template <typename Itype, typename Otype, int Kernel, int Pad, int Stride>
int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) { int TestConvOp(int in_channels, int in_height, int in_width, int out_channels,
int groups) {
int kernel_h = Kernel; int kernel_h = Kernel;
int kernel_w = Kernel; int kernel_w = Kernel;
int pad_h = Pad; int pad_h = Pad;
...@@ -147,7 +148,7 @@ int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) { ...@@ -147,7 +148,7 @@ int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) {
framework::DDim input_shape = framework::DDim input_shape =
framework::make_ddim({batch_size, input_c, input_h, input_w}); framework::make_ddim({batch_size, input_c, input_h, input_w});
framework::DDim filter_shape = framework::DDim filter_shape =
framework::make_ddim({output_c, input_c, kernel_h, kernel_w}); framework::make_ddim({output_c, input_c / groups, kernel_h, kernel_w});
VariableNameMap inputs; VariableNameMap inputs;
VariableNameMap outputs; VariableNameMap outputs;
...@@ -164,13 +165,22 @@ int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) { ...@@ -164,13 +165,22 @@ int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) {
auto filter = filter_var->template GetMutable<framework::LoDTensor>(); auto filter = filter_var->template GetMutable<framework::LoDTensor>();
SetupTensor<Itype>(filter, filter_shape, -20, 20); SetupTensor<Itype>(filter, filter_shape, -20, 20);
for (int i = 0; i < input->numel(); ++i) {
DLOG << "input[" << i
<< "] = " << static_cast<int>(input->data<int8_t>()[i]);
}
for (int i = 0; i < filter->numel(); ++i) {
DLOG << "filter[" << i
<< "] = " << static_cast<int>(filter->data<int8_t>()[i]);
}
auto output_var = scope.get()->Var("output"); auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs; framework::AttributeMap attrs;
attrs["strides"].Set<vector<int>>(std::vector<int>({stride_h, stride_w})); attrs["strides"].Set<vector<int>>(std::vector<int>({stride_h, stride_w}));
attrs["paddings"].Set<vector<int>>(std::vector<int>({pad_h, pad_w})); attrs["paddings"].Set<vector<int>>(std::vector<int>({pad_h, pad_w}));
attrs["dilations"].Set<vector<int>>( attrs["dilations"].Set<vector<int>>(
std::vector<int>({dilation_h, dilation_w})); std::vector<int>({dilation_h, dilation_w}));
attrs["groups"].Set<int>(1); attrs["groups"].Set<int>(groups);
auto *op = new operators::ConvOp<CPU, float>("conv2d", inputs, outputs, attrs, auto *op = new operators::ConvOp<CPU, float>("conv2d", inputs, outputs, attrs,
scope); scope);
...@@ -204,15 +214,15 @@ int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) { ...@@ -204,15 +214,15 @@ int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) {
Otype *output_cmp_data = output_cmp.data<Otype>(); Otype *output_cmp_data = output_cmp.data<Otype>();
for (int i = 0; i < output->numel(); ++i) { for (int i = 0; i < output->numel(); ++i) {
float gap = output_data[i] - output_cmp_data[i]; float gap = output_data[i] - output_cmp_data[i];
PADDLE_MOBILE_ENFORCE(std::abs(gap / (output_data[i] + 1e-5)) < 1e-3, // PADDLE_MOBILE_ENFORCE(std::abs(gap / (output_data[i] + 1e-5)) < 1e-3,
"output[%d] = %d, output_cmp[%d] = %d", i, // "output[%d] = %d, output_cmp[%d] = %d", i,
output_data[i], i, output_cmp_data[i]); // output_data[i], i, output_cmp_data[i]);
// if (std::abs(gap / (output_data[i] + 1e-5)) > 1e-3) { if (std::abs(gap / (output_data[i] + 1e-5)) > 1e-3) {
// LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i] LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i]
// << ", output_cmp_data[" << i << "] = " << << ", output_cmp_data[" << i
// output_cmp_data[i]; << "] = " << output_cmp_data[i];
// return 1; exit(1);
// } }
} }
delete op; delete op;
return 0; return 0;
...@@ -224,7 +234,8 @@ int main(int argc, char *argv[]) { ...@@ -224,7 +234,8 @@ int main(int argc, char *argv[]) {
if (argc < 5) { if (argc < 5) {
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "Usage:\n" << "Usage:\n"
<< " ./test-int8-conv-op in_channels in_height in_width out_channels\n" << " ./test-int8-conv-op in_channels in_height in_width out_channels "
"[groups]\n"
<< " params:\n" << " params:\n"
<< " -in_channels: int, input image's channels\n" << " -in_channels: int, input image's channels\n"
<< " -in_height: int, input image's height\n" << " -in_height: int, input image's height\n"
...@@ -236,72 +247,134 @@ int main(int argc, char *argv[]) { ...@@ -236,72 +247,134 @@ int main(int argc, char *argv[]) {
int in_height = atoi(argv[2]); int in_height = atoi(argv[2]);
int in_width = atoi(argv[3]); int in_width = atoi(argv[3]);
int out_channels = atoi(argv[4]); int out_channels = atoi(argv[4]);
// kernel = 3, pad = 1, stride = 1 int groups = 1;
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=1, stride=1"; if (argc == 6) {
paddle_mobile::TestConvOp<float, float, 3, 1, 1>(in_channels, in_height, groups = atoi(argv[5]);
in_width, out_channels); }
// kernel = 7, pad = 0, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=0, stride=2";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 0, 2>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 1, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=1, stride=2";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 1, 2>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 3, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=2";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 3, 2>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=0, stride=1";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 0, 1>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=1, stride=1";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 1, 1>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 3, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=1";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 3, 1>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 5, stride = 3
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=5, stride=3";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 5, 3>(in_channels, in_height,
in_width, out_channels);
// kernel = 7, pad = 3, stride = 4
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=4";
paddle_mobile::TestConvOp<int8_t, int32_t, 7, 3, 4>(in_channels, in_height,
in_width, out_channels);
// kernel = 3, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=3, pad=0, stride=1";
paddle_mobile::TestConvOp<int8_t, int32_t, 3, 0, 1>(in_channels, in_height,
in_width, out_channels);
// kernel = 3, pad = 0, stride = 1 // kernel = 3, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=0, stride=1"; LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=0, stride=1";
paddle_mobile::TestConvOp<float, float, 3, 0, 1>(in_channels, in_height, paddle_mobile::TestConvOp<int8_t, int32_t, 3, 0, 1>(
in_width, out_channels); in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=3, pad=1, stride=1";
paddle_mobile::TestConvOp<int8_t, int32_t, 3, 1, 1>(in_channels, in_height,
in_width, out_channels);
// kernel = 3, pad = 1, stride = 1 // kernel = 3, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=1, stride=1"; LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=1, stride=1";
paddle_mobile::TestConvOp<float, float, 3, 1, 1>(in_channels, in_height, paddle_mobile::TestConvOp<int8_t, int32_t, 3, 1, 1>(
in_width, out_channels); in_channels, in_height, in_width, out_channels, groups);
// kernel = 5, pad = 0, stride = 1 // kernel = 3, pad = 2, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=5, pad=0, stride=1"; LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=2, stride=1";
paddle_mobile::TestConvOp<int8_t, int32_t, 5, 0, 1>(in_channels, in_height, paddle_mobile::TestConvOp<int8_t, int32_t, 3, 2, 1>(
in_width, out_channels); in_channels, in_height, in_width, out_channels, groups);
// kernel = 5, pad = 0, stride = 1 // kernel = 3, pad = 5, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=5, pad=0, stride=1"; LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=5, stride=1";
paddle_mobile::TestConvOp<float, float, 5, 0, 1>(in_channels, in_height, paddle_mobile::TestConvOp<int8_t, int32_t, 3, 5, 1>(
in_width, out_channels); in_channels, in_height, in_width, out_channels, groups);
// kernel = 5, pad = 2, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=5, pad=2, stride=1"; // kernel = 3, pad = 0, stride = 2
paddle_mobile::TestConvOp<int8_t, int32_t, 5, 2, 1>(in_channels, in_height, LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=0, stride=2";
in_width, out_channels); paddle_mobile::TestConvOp<int8_t, int32_t, 3, 0, 2>(
// kernel = 5, pad = 2, stride = 1 in_channels, in_height, in_width, out_channels, groups);
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=5, pad=2, stride=1"; // kernel = 3, pad = 1, stride = 2
paddle_mobile::TestConvOp<float, float, 5, 2, 1>(in_channels, in_height, LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=1, stride=2";
in_width, out_channels); paddle_mobile::TestConvOp<int8_t, int32_t, 3, 1, 2>(
in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 2, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=2, stride=2";
paddle_mobile::TestConvOp<int8_t, int32_t, 3, 2, 2>(
in_channels, in_height, in_width, out_channels, groups);
// kernel = 3, pad = 5, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=5, stride=2";
paddle_mobile::TestConvOp<int8_t, int32_t, 3, 5, 2>(
in_channels, in_height, in_width, out_channels, groups);
// // kernel = 7, pad = 0, stride = 2
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=0, stride=2";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 0, 2>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 1, stride = 2
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=1, stride=2";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 1, 2>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 3, stride = 2
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=2";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 3, 2>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=0, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 0, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 1, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=1, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 1, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 3, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 3, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 5, stride = 3
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=5, stride=3";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 5, 3>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 7, pad = 3, stride = 4
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=4";
// paddle_mobile::TestConvOp<int8_t, int32_t, 7, 3, 4>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 3, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=3, pad=0, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 3, 0, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 3, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=0, stride=1";
// paddle_mobile::TestConvOp<float, float, 3, 0, 1>(in_channels, in_height,
// in_width, out_channels,
// groups);
// // kernel = 3, pad = 1, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=3, pad=1, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 3, 1, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 3, pad = 1, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "float, kernel=3, pad=1, stride=1";
// paddle_mobile::TestConvOp<float, float, 3, 1, 1>(in_channels, in_height,
// in_width, out_channels,
// groups);
// // kernel = 5, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=5, pad=0, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 5, 0, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 5, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "float, kernel=5, pad=0, stride=1";
// paddle_mobile::TestConvOp<float, float, 5, 0, 1>(in_channels, in_height,
// in_width, out_channels,
// groups);
// // kernel = 5, pad = 2, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=5, pad=2, stride=1";
// paddle_mobile::TestConvOp<int8_t, int32_t, 5, 2, 1>(in_channels,
// in_height,
// in_width,
// out_channels, groups);
// // kernel = 5, pad = 2, stride = 1
// LOG(paddle_mobile::kLOG_INFO) << "float, kernel=5, pad=2, stride=1";
// paddle_mobile::TestConvOp<float, float, 5, 2, 1>(in_channels, in_height,
// in_width, out_channels,
// groups);
} }
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <iostream>
#ifdef FUSION_CONVADDRELU_INT8_OP
#include <limits>
#include "../test_helper.h"
#include "../test_include.h"
#include "operators/fusion_conv_add_relu_int8_op.h"
namespace paddle_mobile {
int32_t qadd_int32(int32_t l, int32_t r) {
int64_t res = static_cast<int64_t>(l) + static_cast<int64_t>(r);
if (res > std::numeric_limits<int32_t>::max())
return std::numeric_limits<int32_t>::max();
else if (res < std::numeric_limits<int32_t>::min())
return std::numeric_limits<int32_t>::min();
else
return static_cast<int32_t>(res);
}
// round to zero
float round2zero(float v) {
float res;
if (v > 0)
res = std::floor(v);
else if (v < 0)
res = std::ceil(v);
return res;
}
int8_t qscale_int32(int32_t v, float scale) {
float res = static_cast<float>(v) * scale;
res = round2zero(res);
if (res > 127)
return static_cast<int8_t>(127);
else if (res < -127)
return static_cast<int8_t>(-127);
else
return static_cast<int8_t>(res);
}
// Reference convolution from Caffe for checking results.
// accumulate through explicit loops over input, output, and filters.
template <typename T>
void conv2d(const framework::Tensor *input, const framework::Tensor *filter,
const framework::Tensor *bias, const framework::AttributeMap &attrs,
framework::Tensor *output, float scale) {
framework::AttrReader attr_reader(attrs);
std::vector<int> paddings = attr_reader.Get<std::vector<int>>("paddings");
std::vector<int> strides = attr_reader.Get<std::vector<int>>("strides");
std::vector<int> dilations = attr_reader.Get<std::vector<int>>("dilations");
int groups = attr_reader.Get<int>("groups");
int kernel_h = filter->dims()[2];
int kernel_w = filter->dims()[3];
int pad_h = paddings[0];
int pad_w = paddings[1];
int stride_h = strides[0];
int stride_w = strides[1];
int dilation_h = dilations[0];
int dilation_w = dilations[1];
auto in_shape = input->dims();
auto out_shape = output->dims();
const bool has_depth = 0;
int kernel_d, pad_d, stride_d, dilation_d;
if (has_depth) {
kernel_d = kernel_h;
stride_d = stride_h;
pad_d = pad_h;
dilation_d = dilation_h;
} else {
kernel_d = stride_d = dilation_d = 1;
pad_d = 0;
}
// Groups
int o_g = out_shape[1] / groups;
int k_g = in_shape[1] / groups;
int o_head, k_head;
// Convolution
vector<int> weight_offset(4 + has_depth);
vector<int> in_offset(4 + has_depth);
vector<int> out_offset(4 + has_depth);
auto offset = [](const framework::Tensor *input, const vector<int> &indics) {
framework::DDim shape = input->dims();
size_t count = 0;
for (int i = 0; i < indics.size(); ++i) {
count *= shape[i];
count += indics[i];
}
return count;
};
const T *in_data = input->data<T>();
const T *w_data = filter->data<T>();
framework::Tensor output_32;
int32_t *out_data_32 = output_32.mutable_data<int32_t>(out_shape);
memset(out_data_32, 0, output_32.numel() * sizeof(int32_t));
for (int n = 0; n < out_shape[0]; n++) {
for (int g = 0; g < groups; g++) {
o_head = o_g * g;
k_head = k_g * g;
for (int o = 0; o < o_g; o++) {
for (int k = 0; k < k_g; k++) {
for (int z = 0; z < (has_depth ? out_shape[2] : 1); z++) {
for (int y = 0; y < out_shape[2 + has_depth]; y++) {
for (int x = 0; x < out_shape[3 + has_depth]; x++) {
for (int r = 0; r < kernel_d; r++) {
for (int p = 0; p < kernel_h; p++) {
for (int q = 0; q < kernel_w; q++) {
int in_z = z * stride_d - pad_d + r * dilation_d;
int in_y = y * stride_h - pad_h + p * dilation_h;
int in_x = x * stride_w - pad_w + q * dilation_w;
if (in_z >= 0 && in_z < (has_depth ? in_shape[2] : 1) &&
in_y >= 0 && in_y < in_shape[2 + has_depth] &&
in_x >= 0 && in_x < in_shape[3 + has_depth]) {
weight_offset[0] = o + o_head;
weight_offset[1] = k;
if (has_depth) {
weight_offset[2] = r;
}
weight_offset[2 + has_depth] = p;
weight_offset[3 + has_depth] = q;
in_offset[0] = n;
in_offset[1] = k + k_head;
if (has_depth) {
in_offset[2] = in_z;
}
in_offset[2 + has_depth] = in_y;
in_offset[3 + has_depth] = in_x;
out_offset[0] = n;
out_offset[1] = o + o_head;
if (has_depth) {
out_offset[2] = z;
}
out_offset[2 + has_depth] = y;
out_offset[3 + has_depth] = x;
out_data_32[offset(output, out_offset)] +=
in_data[offset(input, in_offset)] *
w_data[offset(filter, weight_offset)];
}
}
}
}
}
}
}
}
}
}
}
T *out_data = output->mutable_data<T>();
int32_t n = out_shape[0];
int32_t c = out_shape[1];
int32_t h = out_shape[2];
int32_t w = out_shape[3];
const int32_t *bias_data = bias->data<int32_t>();
for (int i = 0; i < n; ++i) {
for (int j = 0; j < c; ++j) {
int32_t bias_v = bias_data[j];
for (int k = 0; k < h; ++k) {
for (int l = 0; l < w; ++l) {
int32_t tmp = out_data_32[i * c * h * w + j * h * w + k * w + l];
tmp = qadd_int32(tmp, bias_v);
tmp = std::max(0, tmp);
out_data[i * c * h * w + j * h * w + k * w + l] =
qscale_int32(tmp, scale);
}
}
}
}
}
template <typename T, int Kernel, int Pad, int Stride>
int TestConvOp(int in_channels, int in_height, int in_width, int out_channels) {
int kernel_h = Kernel;
int kernel_w = Kernel;
int pad_h = Pad;
int pad_w = Pad;
int stride_h = Stride;
int stride_w = Stride;
int dilation_h = 1;
int dilation_w = 1;
int batch_size = 1;
int input_c = in_channels;
int input_h = in_height;
int input_w = in_width;
int output_c = out_channels;
framework::DDim input_shape =
framework::make_ddim({batch_size, input_c, input_h, input_w});
framework::DDim filter_shape =
framework::make_ddim({output_c, input_c, kernel_h, kernel_w});
int kernel_extent_h = dilation_h * (kernel_h - 1) + 1;
int kernel_extent_w = dilation_w * (kernel_w - 1) + 1;
int output_h = (input_h + 2 * pad_h - kernel_extent_h) / stride_h + 1;
int output_w = (input_w + 2 * pad_w - kernel_extent_w) / stride_w + 1;
framework::DDim output_shape = framework::make_ddim(
std::vector<int>({batch_size, output_c, output_h, output_w}));
framework::DDim bias_shape = framework::make_ddim({output_c});
VariableNameMap inputs;
VariableNameMap outputs;
auto scope = std::make_shared<framework::Scope>();
inputs["Input"] = std::vector<std::string>({"input"});
inputs["Filter"] = std::vector<std::string>({"filter"});
inputs["Scale"] = std::vector<std::string>({"scale"});
inputs["Y"] = std::vector<std::string>({"bias"});
outputs["Out"] = std::vector<std::string>({"output"});
auto input_var = scope.get()->Var("input");
auto input = input_var->template GetMutable<framework::LoDTensor>();
SetupTensor<T>(input, input_shape, -127, 127);
auto filter_var = scope.get()->Var("filter");
auto filter = filter_var->template GetMutable<framework::LoDTensor>();
SetupTensor<T>(filter, filter_shape, -127, 127);
auto scale_var = scope.get()->Var("scale");
auto scale = scale_var->template GetMutable<framework::LoDTensor>();
scale->Resize(framework::make_ddim({1}));
float scale_v = 0.000828f;
scale->mutable_data<float>()[0] = scale_v;
auto bias_var = scope.get()->Var("bias");
auto bias = bias_var->template GetMutable<framework::LoDTensor>();
SetupTensor<int32_t>(bias, bias_shape, -127, 127);
auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs;
attrs["strides"].Set<vector<int>>(std::vector<int>({stride_h, stride_w}));
attrs["paddings"].Set<vector<int>>(std::vector<int>({pad_h, pad_w}));
attrs["dilations"].Set<vector<int>>(
std::vector<int>({dilation_h, dilation_w}));
attrs["groups"].Set<int>(1);
attrs["axis"].Set<int>(0);
auto *op = new operators::FusionConvAddReluInt8Op<CPU, T>(
"fusion_conv_add_relu_int8", inputs, outputs, attrs, scope);
op->InferShape();
op->Init();
op->Run();
framework::Tensor output_cmp;
output_cmp.mutable_data<T>(output_shape);
conv2d<T>(input, filter, bias, attrs, &output_cmp, scale_v);
// compare results
int eq = 0;
int neq = 0;
auto output = output_var->template Get<framework::LoDTensor>();
const T *output_data = output->data<T>();
T *output_cmp_data = output_cmp.data<T>();
for (int i = 0; i < output->numel(); ++i) {
PADDLE_MOBILE_ENFORCE(
output_data[i] == output_cmp_data[i],
"The execution of test_fusion_conv_add_relu_int8_op is failed!");
if (output_data[i] == output_cmp_data[i]) {
++eq;
} else {
++neq;
}
}
std::cout << "eq = " << eq << ", neq = " << neq << std::endl;
delete op;
return 0;
}
} // namespace paddle_mobile
int main(int argc, char *argv[]) {
if (argc < 5) {
LOG(paddle_mobile::kLOG_INFO)
<< "Usage:\n"
<< " ./test-conv-add-relu-int8-op in_channels in_height in_width "
"out_channels\n"
<< " params:\n"
<< " -in_channels: int, input image's channels\n"
<< " -in_height: int, input image's height\n"
<< " -in_width: int, input image's width\n"
<< " -out_channels: int, conv output channels\n";
return 1;
}
int in_channels = atoi(argv[1]);
int in_height = atoi(argv[2]);
int in_width = atoi(argv[3]);
int out_channels = atoi(argv[4]);
// kernel = 3, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8_t, kernel=3, pad=1, stride=1";
paddle_mobile::TestConvOp<int8_t, 3, 1, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 0, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=0, stride=2";
paddle_mobile::TestConvOp<int8_t, 7, 0, 2>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 1, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=1, stride=2";
paddle_mobile::TestConvOp<int8_t, 7, 1, 2>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 3, stride = 2
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=2";
paddle_mobile::TestConvOp<int8_t, 7, 3, 2>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=0, stride=1";
paddle_mobile::TestConvOp<int8_t, 7, 0, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=1, stride=1";
paddle_mobile::TestConvOp<int8_t, 7, 1, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 3, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=1";
paddle_mobile::TestConvOp<int8_t, 7, 3, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 5, stride = 3
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=5, stride=3";
paddle_mobile::TestConvOp<int8_t, 7, 5, 3>(in_channels, in_height, in_width,
out_channels);
// kernel = 7, pad = 3, stride = 4
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=7, pad=3, stride=4";
paddle_mobile::TestConvOp<int8_t, 7, 3, 4>(in_channels, in_height, in_width,
out_channels);
// kernel = 3, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=3, pad=0, stride=1";
paddle_mobile::TestConvOp<int8_t, 3, 0, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 3, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=3, pad=1, stride=1";
paddle_mobile::TestConvOp<int8_t, 3, 1, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 5, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=5, pad=0, stride=1";
paddle_mobile::TestConvOp<int8_t, 5, 0, 1>(in_channels, in_height, in_width,
out_channels);
// kernel = 5, pad = 2, stride = 1
LOG(paddle_mobile::kLOG_INFO) << "int8, kernel=5, pad=2, stride=1";
paddle_mobile::TestConvOp<int8_t, 5, 2, 1>(in_channels, in_height, in_width,
out_channels);
}
#else
int main() {
std::cout << "FUSION_CONVADDRELU_INT8_OP is not defined!" << std::endl;
return 0;
}
#endif
...@@ -18,9 +18,6 @@ limitations under the License. */ ...@@ -18,9 +18,6 @@ limitations under the License. */
#include "../test_include.h" #include "../test_include.h"
#include "framework/operator.h" #include "framework/operator.h"
#include "operators/fusion_fc_op.h" #include "operators/fusion_fc_op.h"
#ifdef FUSION_FC_INT8_OP
#include "operators/fusion_fc_int8_op.h"
#endif
#define a(i, j) a[(i)*lda + (j)] #define a(i, j) a[(i)*lda + (j)]
#define b(i, j) b[(i)*ldb + (j)] #define b(i, j) b[(i)*ldb + (j)]
...@@ -105,18 +102,8 @@ int TestFcOP() { ...@@ -105,18 +102,8 @@ int TestFcOP() {
attrs["y_num_col_dims"].Set<int>(1); attrs["y_num_col_dims"].Set<int>(1);
attrs["axis"].Set<int>(1); attrs["axis"].Set<int>(1);
operators::OperatorBase<CPU> *op = nullptr; operators::OperatorBase<CPU> *op = nullptr;
#ifdef FUSION_FC_INT8_OP
if (std::is_same<T, int8_t>::value) {
op = new operators::FusionFcInt8Op<CPU, T>("fusion_fc_int8", inputs,
outputs, attrs, scope);
} else {
op = new operators::FusionFcOp<CPU, T>("fusion_fc", inputs, outputs, attrs,
scope);
}
#else
op = new operators::FusionFcOp<CPU, T>("fusion_fc", inputs, outputs, attrs, op = new operators::FusionFcOp<CPU, T>("fusion_fc", inputs, outputs, attrs,
scope); scope);
#endif
op->InferShape(); op->InferShape();
op->Run(); op->Run();
auto output = output_var->template Get<framework::LoDTensor>(); auto output = output_var->template Get<framework::LoDTensor>();
...@@ -168,9 +155,6 @@ int TestFcOP() { ...@@ -168,9 +155,6 @@ int TestFcOP() {
int main() { int main() {
paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile; paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile;
paddle_mobile.SetThreadNum(4); paddle_mobile.SetThreadNum(4);
#ifdef FUSION_FC_INT8_OP
paddle_mobile::TestFcOP<int8_t, int32_t>();
#endif
paddle_mobile::TestFcOP<float, float>(); paddle_mobile::TestFcOP<float, float>();
return 0; return 0;
} }
...@@ -14,88 +14,14 @@ limitations under the License. */ ...@@ -14,88 +14,14 @@ limitations under the License. */
#include <iostream> #include <iostream>
#include "../test_include.h" #include "../test_include.h"
#include "operators/kernel/central-arm-func/pool_arm_func.h" #include "operators/math/pooling.h"
#include "operators/pool_op.h" #include "operators/pool_op.h"
namespace paddle_mobile { namespace paddle_mobile {
static int PoolOutputSize(int input_size, int filter_size, int padding,
int stride, bool ceil_mode) {
int output_size;
if (!ceil_mode) {
output_size = (input_size - filter_size + 2 * padding) / stride + 1;
} else {
output_size =
(input_size - filter_size + 2 * padding + stride - 1) / stride + 1;
}
return output_size;
}
template <typename T> namespace math = operators::math;
static void PoolAvgPad0(std::vector<int> ksize, std::vector<int> strides,
const framework::Tensor *input,
framework::Tensor *out) {
const int32_t batch_size = input->dims()[0];
const int32_t input_c = input->dims()[1];
const int32_t input_h = input->dims()[2];
const int32_t input_w = input->dims()[3];
const int32_t out_c = out->dims()[1];
const int32_t out_h = out->dims()[2];
const int32_t out_w = out->dims()[3];
const int32_t kernel_h = ksize[0];
const int32_t kernel_w = ksize[1];
const int32_t stride_h = strides[0];
const int32_t stride_w = strides[1];
const int32_t inputdata_channel_stride = input_h * input_w;
const int32_t input_batch_stride = input_c * inputdata_channel_stride;
const int32_t outputdata_channel_stride = out_h * out_w;
const int32_t output_batch_stride = out_c * outputdata_channel_stride;
T *out_data = out->mutable_data<T>();
const T *input_data = input->data<T>();
const T **rows = new const T *[kernel_h];
for (int i = 0; i < batch_size; ++i) {
for (int j = 0; j < out_c; ++j) {
const T *img_in = input_data + j * inputdata_channel_stride;
T *img_out = out_data + j * outputdata_channel_stride;
for (int k = 0; k < out_h; ++k) {
for (int m = 0; m < kernel_h; ++m) {
rows[m] = img_in + (stride_h * k + m) * input_w;
}
int32_t left = out_w;
while (left > 0) {
float tmp = 0;
for (int m = 0; m < kernel_h; ++m) {
for (int l = 0; l < kernel_w; ++l) {
tmp += rows[m][l];
}
}
if (typeid(T) == typeid(int8_t)) {
tmp = tmp / (kernel_h * kernel_w);
if (tmp < -127) {
*img_out = -127;
} else if (tmp > 127) {
*img_out = 127;
} else {
*img_out = static_cast<T>(std::round(tmp));
}
} else {
*img_out = static_cast<T>(tmp / (kernel_h * kernel_w));
}
for (int m = 0; m < kernel_h; ++m) {
rows[m] += stride_w;
}
img_out++;
left--;
}
}
}
input_data += input_batch_stride;
out_data += output_batch_stride;
}
delete[] rows;
}
template <typename T, int CeilMode, int PoolType, int Kernel, int Pad, template <int PoolType, int Kernel, int Pad, int Stride>
int Stride>
int TestPoolOp(int in_channels, int in_height, int in_width) { int TestPoolOp(int in_channels, int in_height, int in_width) {
int kernel_h = Kernel; int kernel_h = Kernel;
int kernel_w = Kernel; int kernel_w = Kernel;
...@@ -103,7 +29,6 @@ int TestPoolOp(int in_channels, int in_height, int in_width) { ...@@ -103,7 +29,6 @@ int TestPoolOp(int in_channels, int in_height, int in_width) {
int pad_w = Pad; int pad_w = Pad;
int stride_h = Stride; int stride_h = Stride;
int stride_w = Stride; int stride_w = Stride;
bool ceil_mode = CeilMode != 0;
std::string pooling_type = (PoolType == 0 ? "max" : "avg"); std::string pooling_type = (PoolType == 0 ? "max" : "avg");
int batch_size = 1; int batch_size = 1;
...@@ -114,14 +39,6 @@ int TestPoolOp(int in_channels, int in_height, int in_width) { ...@@ -114,14 +39,6 @@ int TestPoolOp(int in_channels, int in_height, int in_width) {
framework::DDim input_shape = framework::DDim input_shape =
framework::make_ddim({batch_size, input_c, input_h, input_w}); framework::make_ddim({batch_size, input_c, input_h, input_w});
std::vector<int64_t> output_shape_v({batch_size, input_c});
output_shape_v.push_back(
PoolOutputSize(input_h, kernel_h, pad_h, stride_h, ceil_mode));
output_shape_v.push_back(
PoolOutputSize(input_w, kernel_w, pad_w, stride_w, ceil_mode));
framework::DDim output_shape = framework::make_ddim(output_shape_v);
VariableNameMap inputs; VariableNameMap inputs;
VariableNameMap outputs; VariableNameMap outputs;
auto scope = std::make_shared<framework::Scope>(); auto scope = std::make_shared<framework::Scope>();
...@@ -130,7 +47,11 @@ int TestPoolOp(int in_channels, int in_height, int in_width) { ...@@ -130,7 +47,11 @@ int TestPoolOp(int in_channels, int in_height, int in_width) {
auto input_var = scope.get()->Var("input"); auto input_var = scope.get()->Var("input");
auto input = input_var->template GetMutable<framework::LoDTensor>(); auto input = input_var->template GetMutable<framework::LoDTensor>();
SetupTensor<T>(input, input_shape, -127, 127); SetupTensor<float>(input, input_shape, -127, 127);
// for (int i = 0; i < input->numel(); ++i) {
// DLOG << "input[" << i << "] = " << input->data<float>()[i];
// }
auto output_var = scope.get()->Var("output"); auto output_var = scope.get()->Var("output");
framework::AttributeMap attrs; framework::AttributeMap attrs;
...@@ -138,7 +59,8 @@ int TestPoolOp(int in_channels, int in_height, int in_width) { ...@@ -138,7 +59,8 @@ int TestPoolOp(int in_channels, int in_height, int in_width) {
attrs["ksize"].Set<vector<int>>(std::vector<int>({kernel_h, kernel_w})); attrs["ksize"].Set<vector<int>>(std::vector<int>({kernel_h, kernel_w}));
attrs["strides"].Set<vector<int>>(std::vector<int>({stride_h, stride_w})); attrs["strides"].Set<vector<int>>(std::vector<int>({stride_h, stride_w}));
attrs["paddings"].Set<vector<int>>(std::vector<int>({pad_h, pad_w})); attrs["paddings"].Set<vector<int>>(std::vector<int>({pad_h, pad_w}));
attrs["ceil_mode"].Set<bool>(false); attrs["ceil_mode"].Set<bool>(true);
// attrs["ceil_mode"].Set<bool>(false);
attrs["global_pooling"].Set<bool>(false); attrs["global_pooling"].Set<bool>(false);
auto *op = new operators::PoolOp<CPU, float>("pool2d", inputs, outputs, attrs, auto *op = new operators::PoolOp<CPU, float>("pool2d", inputs, outputs, attrs,
...@@ -147,43 +69,36 @@ int TestPoolOp(int in_channels, int in_height, int in_width) { ...@@ -147,43 +69,36 @@ int TestPoolOp(int in_channels, int in_height, int in_width) {
op->Init(); op->Init();
op->Run(); op->Run();
auto output = output_var->template Get<framework::LoDTensor>();
framework::Tensor output_cmp; framework::Tensor output_cmp;
output_cmp.mutable_data<T>(output_shape); output_cmp.mutable_data<float>(output->dims());
if (pooling_type == "avg" && pad_h == 0 && pad_h == pad_w) {
PoolAvgPad0<T>(std::vector<int>{kernel_h, kernel_w}, if (pooling_type == "avg") {
std::vector<int>{stride_h, stride_w}, input, &output_cmp); math::Pooling<AVG>()(*input, std::vector<int>{kernel_h, kernel_w},
} else { std::vector<int>{stride_h, stride_w},
if (typeid(T) == typeid(int8_t)) { std::vector<int>{pad_h, pad_w}, &output_cmp);
operators::PoolBasic<int8_t, int32_t>(
pooling_type, std::vector<int>{kernel_h, kernel_w},
std::vector<int>{stride_h, stride_w}, std::vector<int>{pad_h, pad_w},
input, &output_cmp);
} else { } else {
operators::PoolBasic<float, float>( math::Pooling<MAX>()(*input, std::vector<int>{kernel_h, kernel_w},
pooling_type, std::vector<int>{kernel_h, kernel_w}, std::vector<int>{stride_h, stride_w},
std::vector<int>{stride_h, stride_w}, std::vector<int>{pad_h, pad_w}, std::vector<int>{pad_h, pad_w}, &output_cmp);
input, &output_cmp);
}
} }
// compare results // compare results
int eq = 0; const float *output_data = output->data<float>();
int neq = 0; float *output_cmp_data = output_cmp.data<float>();
auto output = output_var->template Get<framework::LoDTensor>();
const T *output_data = output->data<T>();
T *output_cmp_data = output_cmp.data<T>();
for (int i = 0; i < output->numel(); ++i) { for (int i = 0; i < output->numel(); ++i) {
PADDLE_MOBILE_ENFORCE(output_data[i] == output_cmp_data[i], float gap = output_data[i] - output_cmp_data[i];
"The execution of test_pool_op is failed!"); // PADDLE_MOBILE_ENFORCE(output_data[i] == output_cmp_data[i],
if (output_data[i] == output_cmp_data[i]) { // "output[%d] = %d, output_cmp[%d] = %d", i,
++eq; // output_data[i], i, output_cmp_data[i]);
} else { if (gap > 1e-5 && std::abs(gap / (output_data[i] + 1e-5)) > 1e-3) {
++neq; LOG(kLOG_INFO) << "output_data[" << i << "] = " << output_data[i]
<< ", output_cmp_data[" << i
<< "] = " << output_cmp_data[i];
exit(1);
} }
} }
std::cout << "eq = " << eq << ", neq = " << neq << std::endl;
delete op; delete op;
return 0; return 0;
} }
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -202,91 +117,80 @@ int main(int argc, char *argv[]) { ...@@ -202,91 +117,80 @@ int main(int argc, char *argv[]) {
int in_channels = atoi(argv[1]); int in_channels = atoi(argv[1]);
int in_height = atoi(argv[2]); int in_height = atoi(argv[2]);
int in_width = atoi(argv[3]); int in_width = atoi(argv[3]);
#if __ARM_NEON
// kernel = 3, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "float, ceil_mode=false, pooling_type=max, kernel=3, pad=1, stride=1"; << "float, pooling_type=max, kernel=3, pad=0, stride=1";
paddle_mobile::TestPoolOp<float, 0, 0, 3, 1, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 0, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 0, stride = 2
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "float, ceil_mode=false, pooling_type=max, kernel=3, pad=0, stride=2"; << "float, pooling_type=max, kernel=3, pad=1, stride=1";
paddle_mobile::TestPoolOp<float, 0, 0, 3, 0, 2>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 1, 1>(in_channels, in_height, in_width);
in_width);
#endif
// kernel = 3, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=0, stride=1"; << "float, pooling_type=max, kernel=3, pad=2, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 0, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 2, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 1, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=1, stride=1"; << "float, pooling_type=max, kernel=3, pad=5, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 1, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 5, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 2, stride = 1
LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=2, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 2, 1>(in_channels, in_height,
in_width);
// kernel = 3, pad = 0, stride = 2
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=0, stride=2"; << "float, pooling_type=avg, kernel=3, pad=0, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 0, 2>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 0, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 1, stride = 2
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=1, stride=2"; << "float, pooling_type=avg, kernel=3, pad=1, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 1, 2>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 1, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 0, stride = 2
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=2, stride=2"; << "float, pooling_type=avg, kernel=3, pad=2, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 2, 2>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 2, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 3, stride = 3
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=max, kernel=3, pad=3, stride=3"; << "float, pooling_type=avg, kernel=3, pad=5, stride=1";
paddle_mobile::TestPoolOp<int8_t, 0, 0, 3, 3, 3>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 5, 1>(in_channels, in_height, in_width);
in_width);
// kernel = 7, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=avg, kernel=7, pad=0, stride=1"; << "float, pooling_type=max, kernel=3, pad=0, stride=2";
paddle_mobile::TestPoolOp<int8_t, 0, 1, 7, 0, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 0, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 7, pad = 0, stride = 2
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=avg, kernel=7, pad=0, stride=2"; << "float, pooling_type=max, kernel=3, pad=1, stride=2";
paddle_mobile::TestPoolOp<int8_t, 0, 1, 7, 0, 2>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 1, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 7, pad = 0, stride = 3
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=avg, kernel=7, pad=0, stride=3"; << "float, pooling_type=max, kernel=3, pad=2, stride=2";
paddle_mobile::TestPoolOp<int8_t, 0, 1, 7, 0, 3>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 2, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=avg, kernel=3, pad=0, stride=1"; << "float, pooling_type=max, kernel=3, pad=5, stride=2";
paddle_mobile::TestPoolOp<int8_t, 0, 1, 3, 0, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<0, 3, 5, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 3, pad = 0, stride = 3
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "int8_t, ceil_mode=false, pooling_type=avg, kernel=3, pad=0, stride=3"; << "float, pooling_type=avg, kernel=3, pad=0, stride=2";
paddle_mobile::TestPoolOp<int8_t, 0, 1, 3, 0, 3>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 0, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 7, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "float, ceil_mode=false, pooling_type=avg, kernel=7, pad=0, stride=1"; << "float, pooling_type=avg, kernel=3, pad=1, stride=2";
paddle_mobile::TestPoolOp<float, 0, 1, 7, 0, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 1, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 7, pad = 0, stride = 4
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "float, ceil_mode=false, pooling_type=avg, kernel=7, pad=0, stride=4"; << "float, pooling_type=avg, kernel=3, pad=2, stride=2";
paddle_mobile::TestPoolOp<float, 0, 1, 7, 0, 4>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 2, 2>(in_channels, in_height, in_width);
in_width);
// kernel = 5, pad = 0, stride = 1
LOG(paddle_mobile::kLOG_INFO) LOG(paddle_mobile::kLOG_INFO)
<< "float, ceil_mode=false, pooling_type=avg, kernel=5, pad=0, stride=1"; << "float, pooling_type=avg, kernel=3, pad=5, stride=2";
paddle_mobile::TestPoolOp<float, 0, 1, 5, 0, 1>(in_channels, in_height, paddle_mobile::TestPoolOp<1, 3, 5, 2>(in_channels, in_height, in_width);
in_width);
// // kernel = 5, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO)
// << "float, ceil_mode=false, pooling_type=avg, kernel=5, pad=0,
// stride=1";
// paddle_mobile::TestPoolOp<float, 0, 1, 5, 0, 1>(in_channels, in_height,
// in_width);
// // kernel = 5, pad = 0, stride = 2
// LOG(paddle_mobile::kLOG_INFO)
// << "float, ceil_mode=false, pooling_type=avg, kernel=5, pad=0,
// stride=1";
// paddle_mobile::TestPoolOp<float, 0, 1, 5, 0, 2>(in_channels, in_height,
// in_width);
// // kernel = 7, pad = 0, stride = 1
// LOG(paddle_mobile::kLOG_INFO)
// << "float, ceil_mode=false, pooling_type=avg, kernel=7, pad=0,
// stride=1";
// paddle_mobile::TestPoolOp<float, 0, 1, 7, 0, 1>(in_channels, in_height,
// in_width);
// // kernel = 7, pad = 0, stride = 4
// LOG(paddle_mobile::kLOG_INFO)
// << "float, ceil_mode=false, pooling_type=avg, kernel=7, pad=0,
// stride=4";
// paddle_mobile::TestPoolOp<float, 0, 1, 7, 0, 4>(in_channels, in_height,
// in_width);
} }
...@@ -213,8 +213,6 @@ if(NOT FOUND_MATCH) ...@@ -213,8 +213,6 @@ if(NOT FOUND_MATCH)
set(FUSION_CONVADD_OP ON) set(FUSION_CONVADD_OP ON)
set(FUSION_CONVADDPRELU_OP ON) set(FUSION_CONVADDPRELU_OP ON)
set(FUSION_CONVADDRELU_OP ON) set(FUSION_CONVADDRELU_OP ON)
set(FUSION_CONVADDRELU_INT8_OP ON)
set(FUSION_FC_INT8_OP ON)
set(FUSION_FC_OP ON) set(FUSION_FC_OP ON)
set(LRN_OP ON) set(LRN_OP ON)
set(MUL_OP ON) set(MUL_OP ON)
...@@ -251,9 +249,12 @@ if(NOT FOUND_MATCH) ...@@ -251,9 +249,12 @@ if(NOT FOUND_MATCH)
set(SUM_OP ON) set(SUM_OP ON)
set(QUANT_OP ON) set(QUANT_OP ON)
set(DEQUANT_OP ON) set(DEQUANT_OP ON)
set(FUSION_DEQUANT_BN_OP ON)
set(FUSION_DEQUANT_ADD_BN_OP ON) set(FUSION_DEQUANT_ADD_BN_OP ON)
set(FUSION_DEQUANT_BN_RELU_OP ON) set(FUSION_DEQUANT_BN_RELU_OP ON)
set(FUSION_DEQUANT_ADD_BN_RELU_OP ON) set(FUSION_DEQUANT_ADD_BN_RELU_OP ON)
set(FUSION_DEQUANT_ADD_BN_QUANT_OP ON)
set(FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP ON)
endif() endif()
# option(BATCHNORM_OP "" ON) # option(BATCHNORM_OP "" ON)
...@@ -311,9 +312,6 @@ endif() ...@@ -311,9 +312,6 @@ endif()
if (FUSION_CONVADDRELU_OP) if (FUSION_CONVADDRELU_OP)
add_definitions(-DFUSION_CONVADDRELU_OP) add_definitions(-DFUSION_CONVADDRELU_OP)
endif() endif()
if (FUSION_CONVADDRELU_INT8_OP)
add_definitions(-DFUSION_CONVADDRELU_INT8_OP)
endif()
if (FUSION_CONVADDPRELU_OP) if (FUSION_CONVADDPRELU_OP)
add_definitions(-DFUSION_CONVADDPRELU_OP) add_definitions(-DFUSION_CONVADDPRELU_OP)
endif() endif()
...@@ -323,9 +321,6 @@ endif() ...@@ -323,9 +321,6 @@ endif()
if (FUSION_FC_OP) if (FUSION_FC_OP)
add_definitions(-DFUSION_FC_OP) add_definitions(-DFUSION_FC_OP)
endif() endif()
if(FUSION_FC_INT8_OP)
add_definitions(-DFUSION_FC_INT8_OP)
endif()
if (LRN_OP) if (LRN_OP)
add_definitions(-DLRN_OP) add_definitions(-DLRN_OP)
endif() endif()
...@@ -461,6 +456,9 @@ endif() ...@@ -461,6 +456,9 @@ endif()
if (DEQUANT_OP) if (DEQUANT_OP)
add_definitions(-DDEQUANT_OP) add_definitions(-DDEQUANT_OP)
endif() endif()
if (FUSION_DEQUANT_BN_OP)
add_definitions(-DFUSION_DEQUANT_BN_OP)
endif()
if (FUSION_DEQUANT_ADD_BN_OP) if (FUSION_DEQUANT_ADD_BN_OP)
add_definitions(-DFUSION_DEQUANT_ADD_BN_OP) add_definitions(-DFUSION_DEQUANT_ADD_BN_OP)
endif() endif()
...@@ -470,7 +468,12 @@ endif() ...@@ -470,7 +468,12 @@ endif()
if (FUSION_DEQUANT_ADD_BN_RELU_OP) if (FUSION_DEQUANT_ADD_BN_RELU_OP)
add_definitions(-DFUSION_DEQUANT_ADD_BN_RELU_OP) add_definitions(-DFUSION_DEQUANT_ADD_BN_RELU_OP)
endif() endif()
if (FUSION_DEQUANT_ADD_BN_QUANT_OP)
# add_definitions(-DFUSION_DEQUANT_ADD_BN_QUANT_OP)
endif()
if (FUSION_DEQUANT_ADD_BN_RELU_QUANT_OP)
# add_definitions(-DFUSION_DEQUANT_ADD_BN_RELU_QUANT_OP)
endif()
if (TANH_OP) if (TANH_OP)
add_definitions(-DTANH_OP) add_definitions(-DTANH_OP)
...@@ -484,4 +487,3 @@ endif() ...@@ -484,4 +487,3 @@ endif()
if (FUSION_DECONVADDRELU_OP) if (FUSION_DECONVADDRELU_OP)
add_definitions(-DFUSION_DECONVADDRELU_OP) add_definitions(-DFUSION_DECONVADDRELU_OP)
endif() endif()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册