提交 1a9928a9 编写于 作者: M MyPandaShaoxiang

style: style fix

test=develop
上级 1893489c
文件模式从 100755 更改为 100644
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include <unistd.h> #include <unistd.h>
// #include <chrono>
#include <iostream> #include <iostream>
#include <string> #include <string>
#include <vector> #include <vector>
...@@ -84,7 +83,6 @@ void GRUCompute::PrepareForRun() { ...@@ -84,7 +83,6 @@ void GRUCompute::PrepareForRun() {
void GRUCompute::Run() { void GRUCompute::Run() {
auto& param = this->Param<param_t>(); auto& param = this->Param<param_t>();
param.hidden->mutable_data<float>(); param.hidden->mutable_data<float>();
// auto& ctx = this->ctx_->template As<ARMContext>();
// inputs // inputs
auto input = param.input; auto input = param.input;
auto h0 = param.h0; auto h0 = param.h0;
...@@ -106,8 +104,6 @@ void GRUCompute::Run() { ...@@ -106,8 +104,6 @@ void GRUCompute::Run() {
lite::arm::math::LoDTensor2BatchFunctor<float> to_batch; lite::arm::math::LoDTensor2BatchFunctor<float> to_batch;
to_batch(*input, batch_gate, true, param.is_reverse); // 1. to_batch(*input, batch_gate, true, param.is_reverse); // 1.
save_tensor(batch_gate, "_batch_gate.txt");
if (bias) { if (bias) {
auto bias_data = bias->data<float>(); // 2. auto bias_data = bias->data<float>(); // 2.
lite::arm::math::gru_add_with_bias(batch_gate_data, lite::arm::math::gru_add_with_bias(batch_gate_data,
...@@ -115,9 +111,6 @@ void GRUCompute::Run() { ...@@ -115,9 +111,6 @@ void GRUCompute::Run() {
batch_gate_data, batch_gate_data,
batch_size, batch_size,
frame_size * 3); frame_size * 3);
// save_tensor(const_cast<Tensor*>(bias), "_bias.txt");
save_tensor(batch_gate, "_after_bias.txt");
std::cout << "================= bias =================\n";
} }
zynqmp::GRUTensors gru_tensors; zynqmp::GRUTensors gru_tensors;
...@@ -137,7 +130,6 @@ void GRUCompute::Run() { ...@@ -137,7 +130,6 @@ void GRUCompute::Run() {
// //3. // //3.
gru_value.prev_out_value = ordered_h0.mutable_data<float>(); gru_value.prev_out_value = ordered_h0.mutable_data<float>();
gru_tensors.pre_output = ordered_h0.ZynqTensor(); gru_tensors.pre_output = ordered_h0.ZynqTensor();
std::cout << "================= h0 =================\n";
} else { } else {
gru_value.prev_out_value = nullptr; gru_value.prev_out_value = nullptr;
gru_tensors.pre_output = nullptr; gru_tensors.pre_output = nullptr;
...@@ -153,9 +145,6 @@ void GRUCompute::Run() { ...@@ -153,9 +145,6 @@ void GRUCompute::Run() {
zynqmp::Tensor float_input; zynqmp::Tensor float_input;
zynqmp::Tensor hidden_out; zynqmp::Tensor hidden_out;
std::cout << "seq_len::" << seq_len << std::endl;
// exit(-1);
for (size_t n = 0; n < seq_len; n++) { for (size_t n = 0; n < seq_len; n++) {
int bstart = static_cast<int>(batch_starts[n]); int bstart = static_cast<int>(batch_starts[n]);
int bend = static_cast<int>(batch_starts[n + 1]); int bend = static_cast<int>(batch_starts[n + 1]);
...@@ -180,9 +169,6 @@ void GRUCompute::Run() { ...@@ -180,9 +169,6 @@ void GRUCompute::Run() {
float* hidden_data = float* hidden_data =
hidden_out.mutableData<float>(zynqmp::FP32, float_input_shape); hidden_out.mutableData<float>(zynqmp::FP32, float_input_shape);
// memcpy(hidden_prev_data, )
// zynqmp::Tensor* gate = pe_.gate();
gru_tensors.gate = &float_input; gru_tensors.gate = &float_input;
gru_tensors.output = &hidden_out; gru_tensors.output = &hidden_out;
...@@ -196,7 +182,6 @@ void GRUCompute::Run() { ...@@ -196,7 +182,6 @@ void GRUCompute::Run() {
// TODO(chonwhite): copy data back to original tensor; // TODO(chonwhite): copy data back to original tensor;
gru_tensors.pre_output = gru_tensors.output; gru_tensors.pre_output = gru_tensors.output;
// gru_value.prev_out_value = gru_value.output_value;
} }
lite::arm::math::Batch2LoDTensorFunctor<float> to_seq; // 5. lite::arm::math::Batch2LoDTensorFunctor<float> to_seq; // 5.
*(batch_hidden->mutable_lod()) = batch_gate->lod(); *(batch_hidden->mutable_lod()) = batch_gate->lod();
......
...@@ -46,7 +46,6 @@ class GRUCompute ...@@ -46,7 +46,6 @@ class GRUCompute
zynqmp::ElementwiseAddPE bias_ew_pe_; zynqmp::ElementwiseAddPE bias_ew_pe_;
zynqmp::FullyConnectedPE pre_out_pe_; zynqmp::FullyConnectedPE pre_out_pe_;
zynqmp::FullyConnectedPE reset_out_pe_; zynqmp::FullyConnectedPE reset_out_pe_;
// zynqmp::Tensor input_;
zynqmp::GRUPE pe_; zynqmp::GRUPE pe_;
}; };
......
...@@ -118,9 +118,6 @@ class IoCopyFpgaToHostCompute ...@@ -118,9 +118,6 @@ class IoCopyFpgaToHostCompute
param.y->ZynqTensor()->flush(); param.y->ZynqTensor()->flush();
auto out_lod = param.y->mutable_lod(); auto out_lod = param.y->mutable_lod();
*out_lod = param.x->lod(); *out_lod = param.x->lod();
// param.x->ZynqTensor()->saveToFile("io_x", true);
// param.y->ZynqTensor()->saveToFile("io_y", true);
} }
std::string doc() const override { return "Copy IO from FPGA to HOST"; } std::string doc() const override { return "Copy IO from FPGA to HOST"; }
......
...@@ -78,7 +78,6 @@ void PriorBoxCompute::PrepareForRun() { ...@@ -78,7 +78,6 @@ void PriorBoxCompute::PrepareForRun() {
param.boxes->mutable_data<float>(); param.boxes->mutable_data<float>();
param.variances->mutable_data<float>(); param.variances->mutable_data<float>();
// ====================================================
zynqmp::PriorBoxParam& priobox_param = pe_.param(); zynqmp::PriorBoxParam& priobox_param = pe_.param();
priobox_param.input = param.input->ZynqTensor(); priobox_param.input = param.input->ZynqTensor();
priobox_param.image = param.image->ZynqTensor(); priobox_param.image = param.image->ZynqTensor();
...@@ -132,19 +131,3 @@ REGISTER_LITE_KERNEL(prior_box, ...@@ -132,19 +131,3 @@ REGISTER_LITE_KERNEL(prior_box,
.BindOutput("Boxes", {LiteType::GetTensorTy(TARGET(kARM))}) .BindOutput("Boxes", {LiteType::GetTensorTy(TARGET(kARM))})
.BindOutput("Variances", {LiteType::GetTensorTy(TARGET(kARM))}) .BindOutput("Variances", {LiteType::GetTensorTy(TARGET(kARM))})
.Finalize(); .Finalize();
// REGISTER_LITE_KERNEL(prior_box,
// kFPGA,
// kFP16,
// kNHWC,
// paddle::lite::kernels::fpga::PriorBoxCompute,
// def)
// .BindInput("Input", {LiteType::GetTensorTy(TARGET(kFPGA),
// PRECISION(kFP16),
// DATALAYOUT(kNHWC))})
// .BindInput("Image", {LiteType::GetTensorTy(TARGET(kFPGA),
// PRECISION(kFP16),
// DATALAYOUT(kNHWC))})
// .BindOutput("Boxes", {LiteType::GetTensorTy(TARGET(kARM))})
// .BindOutput("Variances", {LiteType::GetTensorTy(TARGET(kARM))})
// .Finalize();
...@@ -53,33 +53,6 @@ void ReshapeCompute::Run() { ...@@ -53,33 +53,6 @@ void ReshapeCompute::Run() {
output->Resize(output_dims); output->Resize(output_dims);
} }
// void ReshapeComputeFpgaToHost::Run() {
// auto& param = Param<operators::ReshapeParam>();
// param.output->mutable_data<float>();
// auto x = param.x;
// // auto actual_shape = param.actual_shape;
// Tensor* actual_shape = nullptr; // TODO(chonwhite) change it.
// auto output = param.output;
// bool inplace = param.inplace;
// auto x_dims = x->dims();
// auto output_dims = output->dims();
// if (actual_shape) {
// auto actual_shape_dims = actual_shape->dims();
// auto* actual_shape_data = actual_shape->data<int>();
// auto shape = std::vector<int>(
// actual_shape_data, actual_shape_data +
// actual_shape_dims.production());
// output_dims = lite::operators::ValidateShape(shape, x_dims);
// output->Resize(output_dims);
// }
// if (inplace) {
// output->ShareDataWith(*x);
// } else {
// output->CopyDataFrom(*x);
// }
// output->Resize(output_dims);
// }
} // namespace fpga } // namespace fpga
} // namespace kernels } // namespace kernels
} // namespace lite } // namespace lite
......
...@@ -39,11 +39,8 @@ void transposeCompute(operators::TransposeParam param) { ...@@ -39,11 +39,8 @@ void transposeCompute(operators::TransposeParam param) {
float_input.mutable_data<float>(); float_input.mutable_data<float>();
float_input.ZynqTensor()->copyFrom(input_x->ZynqTensor()); float_input.ZynqTensor()->copyFrom(input_x->ZynqTensor());
// const auto* input_x_data = input_x->data<float>();
const auto* input_x_data = float_input.data<float>(); const auto* input_x_data = float_input.data<float>();
// auto& param = this->Param<param_t>();
auto* out = param.output; auto* out = param.output;
const auto axis = param.axis; const auto axis = param.axis;
...@@ -84,10 +81,7 @@ void transposeCompute(operators::TransposeParam param) { ...@@ -84,10 +81,7 @@ void transposeCompute(operators::TransposeParam param) {
} }
// Transpose // Transpose
void TransposeCompute::Run() { void TransposeCompute::Run() { auto& param = this->Param<param_t>(); }
auto& param = this->Param<param_t>();
// param.output->mutable_data<float16>();
}
// Transpose2 // Transpose2
void Transpose2Compute::Run() { void Transpose2Compute::Run() {
...@@ -97,25 +91,8 @@ void Transpose2Compute::Run() { ...@@ -97,25 +91,8 @@ void Transpose2Compute::Run() {
param.x->ZynqTensor()->unalignImage(); param.x->ZynqTensor()->unalignImage();
if (param.x->dims().size() != 4) { if (param.x->dims().size() != 4) {
transposeCompute(param); transposeCompute(param);
// auto out = param.Out();
// auto out_data = out->data<half>();
// int num = input_x_dims[1];
// int channel = input_x_dims[2];
// int index = 0;
// for (int n = 0; n < num; n++) {
// for (int c = 0; c < channel; c++) {
// out_data[c * num + n] = input_x_data[n * channel + c];
// index++;
// }
// }
// param.output->ZynqTensor()->copyFrom(param.x->ZynqTensor());
} else { } else {
param.x->ZynqTensor()->saveToFile("tx", true);
param.output->ZynqTensor()->copyFrom(param.x->ZynqTensor()); param.output->ZynqTensor()->copyFrom(param.x->ZynqTensor());
param.output->ZynqTensor()->saveToFile("to", true);
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册