提交 d6b73958 编写于 作者: qnqinan's avatar qnqinan 提交者: GitHub

Merge pull request #786 from chonwhite/develop

fix:#785
......@@ -27,7 +27,7 @@ limitations under the License. */
#include <cstdio>
#include <cstring>
#include "fpga/api/fpga_api.h"
#include "api.h"
namespace paddle_mobile {
namespace fpga {
......
......@@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "fpga/fpga_quantilization.h"
#include "fpga/quantization.h"
#include <algorithm>
namespace paddle_mobile {
......@@ -47,7 +47,7 @@ static Dtype find_max(Dtype* data, int64_t num) {
}
// template <typename Dtype>
void quantify_filter(framework::Tensor* filter) {
void quantize_filter(framework::Tensor* filter) {
DLOG << "quantilize_filter........";
float scale = 0;
......
......@@ -24,7 +24,7 @@ template <typename Dtype>
static void chw_to_hwc(Dtype* data_in, Dtype* data_out, int64_t num,
int64_t channel, int64_t height, int64_t width);
void quantify_filter(framework::Tensor* filter);
void quantize_filter(framework::Tensor* filter);
} // namespace fpga
} // namespace paddle_mobile
......@@ -18,7 +18,7 @@ limitations under the License. */
#ifdef PADDLE_MOBILE_FPGA
#include "fpga/api/fpga_api.h"
#include "fpga/api.h"
#endif
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#ifdef FUSION_CONVADDBN_OP
#include "operators/kernel/conv_add_bn_kernel.h"
#include "fpga/api/fpga_api.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/api.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -60,7 +60,7 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam *param) {
param->SetNewScale(new_scale);
param->SetNewBias(new_bias);
fpga::quantify_filter(filter);
fpga::quantize_filter(filter);
auto filter_ptr = filter->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#ifdef FUSION_CONVADDBNRELU_OP
#include "operators/kernel/conv_add_bn_relu_kernel.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -56,7 +56,7 @@ bool ConvAddBNReluKernel<FPGA, float>::Init(FusionConvAddBNReluParam *param) {
}
param->SetNewScale(new_scale);
param->SetNewBias(new_bias);
fpga::quantify_filter(filter);
fpga::quantize_filter(filter);
auto filter_ptr = filter->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#ifdef FUSION_CONVADDRELU_OP
#include "operators/kernel/conv_add_relu_kernel.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -40,7 +40,7 @@ bool ConvAddReluKernel<FPGA, float>::Init(FusionConvAddReluParam *param) {
bs_ptr[i * 2 + 1] = bias_ptr[i];
}
fpga::quantify_filter(filter);
fpga::quantize_filter(filter);
auto filter_ptr = filter->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#ifdef FUSION_CONVBN_OP
#include "operators/kernel/conv_bn_kernel.h"
#include "fpga/api/fpga_api.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/api.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -55,7 +55,7 @@ bool ConvBNKernel<FPGA, float>::Init(FusionConvBNParam *param) {
}
param->SetNewScale(new_scale);
param->SetNewBias(new_bias);
fpga::quantify_filter(filter);
fpga::quantize_filter(filter);
auto filter_ptr = filter->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -15,7 +15,7 @@ limitations under the License. */
#ifdef FUSION_CONVBNRELU_OP
#include "operators/kernel/conv_bn_relu_kernel.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -52,7 +52,7 @@ bool ConvBNReluKernel<FPGA, float>::Init(FusionConvBNReluParam *param) {
}
param->SetNewScale(new_scale);
param->SetNewBias(new_bias);
fpga::quantify_filter(filter);
fpga::quantize_filter(filter);
auto filter_ptr = filter->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -13,8 +13,9 @@ See the License for the specific language governing permissions and
limitations under the License. */
#ifdef FUSION_FCRELU_OP
#include "operators/kernel/fc_relu_kernel.h"
#include "fpga/api/fpga_api.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/api.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -39,7 +40,7 @@ bool FusionFcReluKernel<FPGA, float>::Init(FusionFcReluParam *param) {
bs_ptr[i * 2 + 1] = input_z_ptr[i];
}
fpga::quantify_filter(input_y);
fpga::quantize_filter(input_y);
auto input_y_ptr = input_y->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -14,7 +14,7 @@ limitations under the License. */
#ifdef FUSION_FC_OP
#include "operators/kernel/fusion_fc_kernel.h"
#include "fpga/fpga_quantilization.h"
#include "fpga/quantization.h"
namespace paddle_mobile {
namespace operators {
......@@ -39,7 +39,7 @@ bool FusionFcKernel<FPGA, float>::Init(FusionFcParam *param) {
bs_ptr[i * 2 + 1] = input_z_ptr[i];
}
fpga::quantify_filter(input_y);
fpga::quantize_filter(input_y);
auto input_y_ptr = input_y->data<int8_t>();
fpga::ConvArgs convArgs;
......
......@@ -17,7 +17,7 @@ limitations under the License. */
#include "../softmax_kernel.h"
#include "../central-arm-func/softmax_arm_func.h"
#include "common/types.h"
#include "fpga/api/fpga_api.h"
#include "fpga/api.h"
#include "operators/math/softmax.h"
namespace paddle_mobile {
namespace operators {
......
......@@ -23,7 +23,7 @@ limitations under the License. */
#include "framework/tensor.h"
#include "framework/variable.h"
#ifdef PADDLE_MOBILE_FPGA
#include "fpga/api/fpga_api.h"
#include "fpga/api.h"
#endif
namespace paddle_mobile {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册