From 45b3766fdf2506c81312c3446ad978f385f69c13 Mon Sep 17 00:00:00 2001 From: nhzlx Date: Tue, 26 Mar 2019 03:34:46 +0000 Subject: [PATCH] fix comments test=develop --- .../inference/anakin/convert/test_fc_op.cc | 2 -- paddle/fluid/inference/anakin/engine.cc | 8 ----- .../inference/api/details/zero_copy_tensor.cc | 2 +- paddle/fluid/inference/api/paddle_api.h | 2 +- .../fluid/operators/anakin/anakin_engine_op.h | 36 +------------------ 5 files changed, 3 insertions(+), 47 deletions(-) diff --git a/paddle/fluid/inference/anakin/convert/test_fc_op.cc b/paddle/fluid/inference/anakin/convert/test_fc_op.cc index 87bce36403..ee6d1dc291 100644 --- a/paddle/fluid/inference/anakin/convert/test_fc_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_fc_op.cc @@ -37,8 +37,6 @@ TEST(fc_op, test) { desc.SetInput("X", {"mul_x"}); desc.SetInput("Y", {"mul_y"}); desc.SetOutput("Out", {"mul_out"}); - // int num_flatten_dims = 3; - // desc.SetAttr("x_num_col_dims", num_flatten_dims); validator.SetOp(*desc.Proto()); validator.Execute(10); diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index 39be786514..ccf78ad7e5 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -90,14 +90,6 @@ void AnakinEngine::Execute( "The anakin input max shape should be greater than" " or equal to the real input shape, Please set the max " "input shape using EnableAnakinEngine"); - /* - if (tensor->numel() > net_shape.count()) { - graph_->Reshape(input.first, fluid_input_shape); - net_.reset(new AnakinNetT(true)); - net_->init(*graph_); - anakin_input = net_->get_in(input.first); - } - */ anakin_input->reshape(fluid_input_shape); ::anakin::saber::Tensor tmp_anakin_tensor(data, TargetT(), 0, diff --git a/paddle/fluid/inference/api/details/zero_copy_tensor.cc b/paddle/fluid/inference/api/details/zero_copy_tensor.cc index d2e2a98a85..937b6398f8 100644 --- a/paddle/fluid/inference/api/details/zero_copy_tensor.cc +++ b/paddle/fluid/inference/api/details/zero_copy_tensor.cc @@ -74,7 +74,7 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const { return res; } -PaddleDType ZeroCopyTensor::type() { +PaddleDType ZeroCopyTensor::type() const { EAGER_GET_TENSOR; auto type = tensor->type(); if (type == framework::proto::VarType::FP32) { diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 52c5cb34b5..87f40f09eb 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -177,7 +177,7 @@ class ZeroCopyTensor { device_ = device; } - PaddleDType type(); + PaddleDType type() const; protected: explicit ZeroCopyTensor(void* scope) : scope_{scope} {} diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.h b/paddle/fluid/operators/anakin/anakin_engine_op.h index 5da3cc1777..9d5b4f6f54 100644 --- a/paddle/fluid/operators/anakin/anakin_engine_op.h +++ b/paddle/fluid/operators/anakin/anakin_engine_op.h @@ -120,41 +120,7 @@ class AnakinEngineOp : public framework::OperatorBase { inference::Singleton::Global() .Get(engine_key_); } - // BUG here, detect that the tensor data pointer here will change sometime. - // Will fix it later. - /* - // For share with the tensor from fluid, We do the net init in the first net - precit. - if (!anakin_engine_->IsInit()) { - auto temp_max_input_shape = anakin_engine_->GetMaxInputShape(); - anakin_engine_->AllocTmpMem(); - for(auto& input : Inputs("Xs")) { - if (param_names_.count(input)) continue; - platform::CUDAPlace - gpu_place(boost::get(dev_place).device); - auto *input_var = scope.FindVar(input); - auto input_tensor = input_var->GetMutable(); - auto input_max_shape = temp_max_input_shape[input]; - - framework::LoDTensor temp_t; - auto t_dims = input_tensor->dims(); - temp_t.Resize(t_dims); - TensorCopySync(*input_tensor, dev_place, &temp_t); - input_tensor->Resize(framework::make_ddim(input_max_shape)); - input_tensor->mutable_data(dev_place); - TensorCopySync(temp_t, dev_place, input_tensor); - - auto* input_data = input_tensor->mutable_data(gpu_place); - auto* anakin_input = anakin_engine_->Net()->get_in(input); - - ::anakin::saber::Tensor<::anakin::saber::NV> - tmp_anakin_tensor(input_data, - ::anakin::saber::NV(), 0, input_max_shape); - anakin_input->share_from(tmp_anakin_tensor); - } - anakin_engine_->InitGraph(); - } - */ + return anakin_engine_; } -- GitLab