diff --git a/paddle/fluid/inference/anakin/convert/test_fc_op.cc b/paddle/fluid/inference/anakin/convert/test_fc_op.cc index 87bce36403fab59fb38697478ed68ff8e68fed7a..ee6d1dc291fe3733ff2e9f66dd453120fa266a55 100644 --- a/paddle/fluid/inference/anakin/convert/test_fc_op.cc +++ b/paddle/fluid/inference/anakin/convert/test_fc_op.cc @@ -37,8 +37,6 @@ TEST(fc_op, test) { desc.SetInput("X", {"mul_x"}); desc.SetInput("Y", {"mul_y"}); desc.SetOutput("Out", {"mul_out"}); - // int num_flatten_dims = 3; - // desc.SetAttr("x_num_col_dims", num_flatten_dims); validator.SetOp(*desc.Proto()); validator.Execute(10); diff --git a/paddle/fluid/inference/anakin/engine.cc b/paddle/fluid/inference/anakin/engine.cc index 39be7865149c70d0514e0a0cc64d0175437469d7..ccf78ad7e56306d24af829c45c888021f4e3fbc4 100644 --- a/paddle/fluid/inference/anakin/engine.cc +++ b/paddle/fluid/inference/anakin/engine.cc @@ -90,14 +90,6 @@ void AnakinEngine::Execute( "The anakin input max shape should be greater than" " or equal to the real input shape, Please set the max " "input shape using EnableAnakinEngine"); - /* - if (tensor->numel() > net_shape.count()) { - graph_->Reshape(input.first, fluid_input_shape); - net_.reset(new AnakinNetT(true)); - net_->init(*graph_); - anakin_input = net_->get_in(input.first); - } - */ anakin_input->reshape(fluid_input_shape); ::anakin::saber::Tensor tmp_anakin_tensor(data, TargetT(), 0, diff --git a/paddle/fluid/inference/api/details/zero_copy_tensor.cc b/paddle/fluid/inference/api/details/zero_copy_tensor.cc index d2e2a98a85509be9eb81b1a6e7d36d9d4d583df4..937b6398f8131a6cf4e8b0002e38f4513f0f884f 100644 --- a/paddle/fluid/inference/api/details/zero_copy_tensor.cc +++ b/paddle/fluid/inference/api/details/zero_copy_tensor.cc @@ -74,7 +74,7 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const { return res; } -PaddleDType ZeroCopyTensor::type() { +PaddleDType ZeroCopyTensor::type() const { EAGER_GET_TENSOR; auto type = tensor->type(); if (type == framework::proto::VarType::FP32) { diff --git a/paddle/fluid/inference/api/paddle_api.h b/paddle/fluid/inference/api/paddle_api.h index 52c5cb34b5378cea1788bb8845f42cfe2f6590b4..87f40f09eb9bb552bd246cb39bbbd41abac1c9ac 100644 --- a/paddle/fluid/inference/api/paddle_api.h +++ b/paddle/fluid/inference/api/paddle_api.h @@ -177,7 +177,7 @@ class ZeroCopyTensor { device_ = device; } - PaddleDType type(); + PaddleDType type() const; protected: explicit ZeroCopyTensor(void* scope) : scope_{scope} {} diff --git a/paddle/fluid/operators/anakin/anakin_engine_op.h b/paddle/fluid/operators/anakin/anakin_engine_op.h index 5da3cc1777625d6b5f91b29c30ba5e192ee4db0f..9d5b4f6f54ccfc9802cef6abac428e28a72ac293 100644 --- a/paddle/fluid/operators/anakin/anakin_engine_op.h +++ b/paddle/fluid/operators/anakin/anakin_engine_op.h @@ -120,41 +120,7 @@ class AnakinEngineOp : public framework::OperatorBase { inference::Singleton::Global() .Get(engine_key_); } - // BUG here, detect that the tensor data pointer here will change sometime. - // Will fix it later. - /* - // For share with the tensor from fluid, We do the net init in the first net - precit. - if (!anakin_engine_->IsInit()) { - auto temp_max_input_shape = anakin_engine_->GetMaxInputShape(); - anakin_engine_->AllocTmpMem(); - for(auto& input : Inputs("Xs")) { - if (param_names_.count(input)) continue; - platform::CUDAPlace - gpu_place(boost::get(dev_place).device); - auto *input_var = scope.FindVar(input); - auto input_tensor = input_var->GetMutable(); - auto input_max_shape = temp_max_input_shape[input]; - - framework::LoDTensor temp_t; - auto t_dims = input_tensor->dims(); - temp_t.Resize(t_dims); - TensorCopySync(*input_tensor, dev_place, &temp_t); - input_tensor->Resize(framework::make_ddim(input_max_shape)); - input_tensor->mutable_data(dev_place); - TensorCopySync(temp_t, dev_place, input_tensor); - - auto* input_data = input_tensor->mutable_data(gpu_place); - auto* anakin_input = anakin_engine_->Net()->get_in(input); - - ::anakin::saber::Tensor<::anakin::saber::NV> - tmp_anakin_tensor(input_data, - ::anakin::saber::NV(), 0, input_max_shape); - anakin_input->share_from(tmp_anakin_tensor); - } - anakin_engine_->InitGraph(); - } - */ + return anakin_engine_; }