提交 32177924 编写于 作者: M MyPandaShaoxiang

fix:delete useless code

上级 9c15846a
...@@ -104,10 +104,3 @@ metal/paddle-mobile-demo/paddle-mobile-demo/Resources ...@@ -104,10 +104,3 @@ metal/paddle-mobile-demo/paddle-mobile-demo/Resources
metal/paddle-mobile-demo/paddle-mobile-demo/Resources/images metal/paddle-mobile-demo/paddle-mobile-demo/Resources/images
metal/paddle-mobile-demo/paddle-mobile-demo/Resources/models metal/paddle-mobile-demo/paddle-mobile-demo/Resources/models
metal/MobileNetDemo/MobileNetDemo/Resources metal/MobileNetDemo/MobileNetDemo/Resources
# generated files
lite/api/paddle_use_kernels.h
lite/api/paddle_use_ops.h
lite/backends/arm/math/dotprod/gemm_sdot.h
lite/tools/cmake_tools/ast.pyc
...@@ -22,8 +22,6 @@ if (WITH_PADDLE_MOBILE) ...@@ -22,8 +22,6 @@ if (WITH_PADDLE_MOBILE)
return() return()
endif(WITH_PADDLE_MOBILE) endif(WITH_PADDLE_MOBILE)
# set(CMAKE_BUILD_TYPE DEBUG)
set(PADDLE_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) set(PADDLE_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) set(PADDLE_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD 11)
......
./lite/tools/build.sh \
--arm_os=armlinux \
--arm_abi=armv8 \
--arm_lang=gcc \
test
...@@ -221,7 +221,6 @@ int8_t* format_filter(float* data_in, ...@@ -221,7 +221,6 @@ int8_t* format_filter(float* data_in,
align_to_x(num_per_div_before_alignment, filter_num_alignment); align_to_x(num_per_div_before_alignment, filter_num_alignment);
int div_num = int div_num =
(num + num_per_div_before_alignment - 1) / num_per_div_before_alignment; (num + num_per_div_before_alignment - 1) / num_per_div_before_alignment;
// int num_after_alignment = num_per_div_after_alignment * div_num;
int residual = num % num_per_div_before_alignment; int residual = num % num_per_div_before_alignment;
int num_after_alignment = num_per_div_after_alignment * int num_after_alignment = num_per_div_after_alignment *
((residual == 0) ? div_num : (div_num - 1)) + ((residual == 0) ? div_num : (div_num - 1)) +
......
文件模式从 100755 更改为 100644
...@@ -62,7 +62,6 @@ void reset_device() { ...@@ -62,7 +62,6 @@ void reset_device() {
// memory management; // memory management;
void *fpga_malloc(size_t size) { void *fpga_malloc(size_t size) {
#ifdef ENABLE_DEBUG #ifdef ENABLE_DEBUG
std::cout << "fpga_malloc:" << size << std::endl;
#endif #endif
#ifdef PADDLE_OS_LINUX #ifdef PADDLE_OS_LINUX
void *ptr = reinterpret_cast<void *>( void *ptr = reinterpret_cast<void *>(
......
...@@ -364,7 +364,6 @@ inline void split_filter_num(const ConvParam& c_param) { ...@@ -364,7 +364,6 @@ inline void split_filter_num(const ConvParam& c_param) {
args.image.height = input->shape().height(); args.image.height = input->shape().height();
args.image.pad_width = param.paddings[1]; args.image.pad_width = param.paddings[1];
args.image.pad_height = param.paddings[0]; args.image.pad_height = param.paddings[0];
args.dilation = param.dilations[0]; args.dilation = param.dilations[0];
args.output.address = out_address; args.output.address = out_address;
...@@ -420,7 +419,6 @@ inline void split_channel(const ConvParam& c_param) { ...@@ -420,7 +419,6 @@ inline void split_channel(const ConvParam& c_param) {
} }
scale.flush(); scale.flush();
bias.flush(); bias.flush();
// Shape sb_shape(N, {2 * channel});
format_scale_bias(&scale, format_scale_bias(&scale,
&bias, &bias,
&conv_param->filter, &conv_param->filter,
...@@ -448,7 +446,6 @@ inline void split_channel(const ConvParam& c_param) { ...@@ -448,7 +446,6 @@ inline void split_channel(const ConvParam& c_param) {
args.image.height = conv_param->input.shape().height(); args.image.height = conv_param->input.shape().height();
args.image.pad_width = param.paddings[1]; args.image.pad_width = param.paddings[1];
args.image.pad_height = param.paddings[0]; args.image.pad_height = param.paddings[0];
args.dilation = param.dilations[0]; args.dilation = param.dilations[0];
args.output.address = conv_param->output.mutableData<void>(); args.output.address = conv_param->output.mutableData<void>();
args.output.scale_address = conv_param->output.scale(); args.output.scale_address = conv_param->output.scale();
...@@ -479,7 +476,6 @@ inline bool compute_conv(const ConvParam& c_conv_params) { ...@@ -479,7 +476,6 @@ inline bool compute_conv(const ConvParam& c_conv_params) {
} }
size_t size = params.size(); size_t size = params.size();
if (ret == 0 && size > 1) { if (ret == 0 && size > 1) {
// Tensor* output = conv_params.output;
Tensor& img = params[0]->output; Tensor& img = params[0]->output;
for (int i = 0; i < 1; i++) { for (int i = 0; i < 1; i++) {
for (int i = 0; i < img.shape().numel(); i++) { for (int i = 0; i < img.shape().numel(); i++) {
......
...@@ -62,7 +62,6 @@ class DepthwiseConvPE : public PE { ...@@ -62,7 +62,6 @@ class DepthwiseConvPE : public PE {
float16* scale_data = param_.scale()->data<float16>(); float16* scale_data = param_.scale()->data<float16>();
float16* filter_data = param.quantizedFilter()->mutableData<float16>( float16* filter_data = param.quantizedFilter()->mutableData<float16>(
FP16, param.filter->shape()); FP16, param.filter->shape());
memcpy(filter_data, memcpy(filter_data,
scale_data, scale_data,
param.filter->shape().numel() * sizeof(float16)); param.filter->shape().numel() * sizeof(float16));
......
文件模式从 100755 更改为 100644
...@@ -119,7 +119,6 @@ class GRUPE : public PE { ...@@ -119,7 +119,6 @@ class GRUPE : public PE {
prev_hidden_.copyFrom(value.pre_output); prev_hidden_.copyFrom(value.pre_output);
} }
mul_pe_.dispatch(); mul_pe_.dispatch();
// reset_hidden_.saveToFile("reset_hidden_.txt");
update_gate_data += stride_update; update_gate_data += stride_update;
reset_gate_data += stride_update; reset_gate_data += stride_update;
...@@ -170,7 +169,6 @@ class GRUPE : public PE { ...@@ -170,7 +169,6 @@ class GRUPE : public PE {
zynqmp::Tensor bias_; zynqmp::Tensor bias_;
zynqmp::Tensor weight_; zynqmp::Tensor weight_;
zynqmp::Tensor state_weight_; zynqmp::Tensor state_weight_;
zynqmp::Tensor update_gate_; zynqmp::Tensor update_gate_;
zynqmp::Tensor reset_gate_; zynqmp::Tensor reset_gate_;
zynqmp::Tensor cell_state_; zynqmp::Tensor cell_state_;
......
...@@ -348,19 +348,9 @@ class Tensor { ...@@ -348,19 +348,9 @@ class Tensor {
if (placeHolder_ == nullptr) { if (placeHolder_ == nullptr) {
return; return;
} }
std::cout << scale()[0] << " , " << scale()[1] << std::endl;
} }
void printScale(std::string type) { void printScale(std::string type) { printScale(); }
std::cout << type << " : "
<< std::to_string(shape_->num()) + "_" +
std::to_string(shape_->channel()) + "_" +
std::to_string(shape_->height()) + "_" +
std::to_string(shape_->width())
<< std::endl;
std::cout << type << " \n";
printScale();
}
std::string dimsFileName() { std::string dimsFileName() {
return std::to_string(shape_->num()) + "_" + return std::to_string(shape_->num()) + "_" +
...@@ -388,7 +378,6 @@ class Tensor { ...@@ -388,7 +378,6 @@ class Tensor {
static int counter = 0; static int counter = 0;
std::string npath = std::to_string(counter) + "_" + path; std::string npath = std::to_string(counter) + "_" + path;
counter++; counter++;
std::cout << "======== saving file:" << npath << " ============\n";
save_file_with_name(npath); save_file_with_name(npath);
} }
......
...@@ -165,9 +165,6 @@ class TensorLite { ...@@ -165,9 +165,6 @@ class TensorLite {
TargetType target() const { return target_; } TargetType target() const { return target_; }
// template <typename T>
// TensorLite Slice(int64_t begin, int64_t end) const;
zynqmp::Tensor *ZynqTensor() const { return zynq_tensor_; } zynqmp::Tensor *ZynqTensor() const { return zynq_tensor_; }
friend std::ostream &operator<<(std::ostream &os, const TensorLite &tensor) { friend std::ostream &operator<<(std::ostream &os, const TensorLite &tensor) {
...@@ -257,7 +254,6 @@ TensorLite TensorLite::Slice(int64_t begin, int64_t end) const { ...@@ -257,7 +254,6 @@ TensorLite TensorLite::Slice(int64_t begin, int64_t end) const {
int64_t base = numel() / dims_[0]; int64_t base = numel() / dims_[0];
TensorLite dst; TensorLite dst;
dst.target_ = target_; dst.target_ = target_;
auto dst_dims = dims_; auto dst_dims = dims_;
dst_dims[0] = end - begin; dst_dims[0] = end - begin;
......
文件模式从 100755 更改为 100644
...@@ -146,7 +146,7 @@ void RuntimeProgram::Run() { ...@@ -146,7 +146,7 @@ void RuntimeProgram::Run() {
#ifdef LITE_WITH_PROFILE #ifdef LITE_WITH_PROFILE
#ifdef LITE_WITH_PRECISION_PROFILE #ifdef LITE_WITH_PRECISION_PROFILE
#ifndef LITE_WITH_FPGA #ifndef LITE_WITH_FPGA
// LITE_PRECISION_PROFILE(inst) LITE_PRECISION_PROFILE(inst)
#endif #endif
#endif // LITE_WITH_PRECISION_PROFILE #endif // LITE_WITH_PRECISION_PROFILE
#endif // LITE_WITH_PROFILE #endif // LITE_WITH_PROFILE
......
...@@ -28,7 +28,6 @@ namespace arm { ...@@ -28,7 +28,6 @@ namespace arm {
void LookupTableCompute::Run() { void LookupTableCompute::Run() {
auto& param = this->Param<param_t>(); auto& param = this->Param<param_t>();
auto& ctx = this->ctx_->template As<ARMContext>();
// inputs // inputs
auto w = param.W; auto w = param.W;
auto ids = param.Ids; auto ids = param.Ids;
...@@ -76,3 +75,13 @@ REGISTER_LITE_KERNEL(lookup_table, ...@@ -76,3 +75,13 @@ REGISTER_LITE_KERNEL(lookup_table,
.BindInput("Ids", {LiteType::GetTensorTy(TARGET(kARM))}) .BindInput("Ids", {LiteType::GetTensorTy(TARGET(kARM))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))})
.Finalize(); .Finalize();
REGISTER_LITE_KERNEL(lookup_table_v2,
kARM,
kFloat,
kNCHW,
paddle::lite::kernels::arm::LookupTableCompute,
def)
.BindInput("W", {LiteType::GetTensorTy(TARGET(kARM))})
.BindInput("Ids", {LiteType::GetTensorTy(TARGET(kARM))})
.BindOutput("Out", {LiteType::GetTensorTy(TARGET(kARM))})
.Finalize();
文件模式从 100755 更改为 100644
文件模式从 100755 更改为 100644
...@@ -191,8 +191,6 @@ class IoCopyFpgaToHostCHWCompute ...@@ -191,8 +191,6 @@ class IoCopyFpgaToHostCHWCompute
param.y->ZynqTensor()->flush(); param.y->ZynqTensor()->flush();
auto out_lod = param.y->mutable_lod(); auto out_lod = param.y->mutable_lod();
*out_lod = param.x->lod(); *out_lod = param.x->lod();
// param.x->ZynqTensor()->saveToFile("io_x", true);
// param.y->ZynqTensor()->saveToFile("io_y", true);
} }
std::string doc() const override { return "Copy IO from FPGA to HOST"; } std::string doc() const override { return "Copy IO from FPGA to HOST"; }
}; };
......
...@@ -78,7 +78,6 @@ void PriorBoxCompute::PrepareForRun() { ...@@ -78,7 +78,6 @@ void PriorBoxCompute::PrepareForRun() {
param.boxes->mutable_data<float>(); param.boxes->mutable_data<float>();
param.variances->mutable_data<float>(); param.variances->mutable_data<float>();
zynqmp::PriorBoxParam& priobox_param = pe_.param(); zynqmp::PriorBoxParam& priobox_param = pe_.param();
priobox_param.input = param.input->ZynqTensor(); priobox_param.input = param.input->ZynqTensor();
priobox_param.image = param.image->ZynqTensor(); priobox_param.image = param.image->ZynqTensor();
......
文件模式从 100755 更改为 100644
文件模式从 100755 更改为 100644
...@@ -63,26 +63,6 @@ REGISTER_LITE_KERNEL(reshape, ...@@ -63,26 +63,6 @@ REGISTER_LITE_KERNEL(reshape,
DATALAYOUT(kAny))}) DATALAYOUT(kAny))})
.Finalize(); .Finalize();
// REGISTER_LITE_KERNEL(reshape,
// kFPGA,
// kFP16,
// kNHWC,
// paddle::lite::kernels::host::ReshapeCompute,
// def)
// .BindInput("X",
// {LiteType::GetTensorTy(
// TARGET(kFPGA), PRECISION(kFP16), DATALAYOUT(kNHWC))})
// .BindInput("ShapeTensor",
// {LiteType::GetTensorTy(
// TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny))})
// .BindInput("Shape",
// {LiteType::GetTensorTy(
// TARGET(kHost), PRECISION(kAny), DATALAYOUT(kAny))})
// .BindOutput("Out",
// {LiteType::GetTensorTy(
// TARGET(kHost), PRECISION(kFloat), DATALAYOUT(kNCHW))})
// .Finalize();
REGISTER_LITE_KERNEL(reshape2, REGISTER_LITE_KERNEL(reshape2,
kHost, kHost,
kAny, kAny,
......
...@@ -13,7 +13,7 @@ readonly NUM_PROC=${LITE_BUILD_THREADS:-4} ...@@ -13,7 +13,7 @@ readonly NUM_PROC=${LITE_BUILD_THREADS:-4}
# global variables # global variables
BUILD_EXTRA=ON BUILD_EXTRA=OFF
BUILD_JAVA=ON BUILD_JAVA=ON
BUILD_PYTHON=OFF BUILD_PYTHON=OFF
BUILD_DIR=$(pwd) BUILD_DIR=$(pwd)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册