提交 45b3766f 编写于 作者: N nhzlx

fix comments

test=develop
上级 a1d11bb1
...@@ -37,8 +37,6 @@ TEST(fc_op, test) { ...@@ -37,8 +37,6 @@ TEST(fc_op, test) {
desc.SetInput("X", {"mul_x"}); desc.SetInput("X", {"mul_x"});
desc.SetInput("Y", {"mul_y"}); desc.SetInput("Y", {"mul_y"});
desc.SetOutput("Out", {"mul_out"}); desc.SetOutput("Out", {"mul_out"});
// int num_flatten_dims = 3;
// desc.SetAttr("x_num_col_dims", num_flatten_dims);
validator.SetOp(*desc.Proto()); validator.SetOp(*desc.Proto());
validator.Execute(10); validator.Execute(10);
......
...@@ -90,14 +90,6 @@ void AnakinEngine<TargetT, PrecisionType, RunType>::Execute( ...@@ -90,14 +90,6 @@ void AnakinEngine<TargetT, PrecisionType, RunType>::Execute(
"The anakin input max shape should be greater than" "The anakin input max shape should be greater than"
" or equal to the real input shape, Please set the max " " or equal to the real input shape, Please set the max "
"input shape using EnableAnakinEngine"); "input shape using EnableAnakinEngine");
/*
if (tensor->numel() > net_shape.count()) {
graph_->Reshape(input.first, fluid_input_shape);
net_.reset(new AnakinNetT<TargetT, PrecisionType, RunType>(true));
net_->init(*graph_);
anakin_input = net_->get_in(input.first);
}
*/
anakin_input->reshape(fluid_input_shape); anakin_input->reshape(fluid_input_shape);
::anakin::saber::Tensor<TargetT> tmp_anakin_tensor(data, TargetT(), 0, ::anakin::saber::Tensor<TargetT> tmp_anakin_tensor(data, TargetT(), 0,
......
...@@ -74,7 +74,7 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const { ...@@ -74,7 +74,7 @@ T *ZeroCopyTensor::data(PaddlePlace *place, int *size) const {
return res; return res;
} }
PaddleDType ZeroCopyTensor::type() { PaddleDType ZeroCopyTensor::type() const {
EAGER_GET_TENSOR; EAGER_GET_TENSOR;
auto type = tensor->type(); auto type = tensor->type();
if (type == framework::proto::VarType::FP32) { if (type == framework::proto::VarType::FP32) {
......
...@@ -177,7 +177,7 @@ class ZeroCopyTensor { ...@@ -177,7 +177,7 @@ class ZeroCopyTensor {
device_ = device; device_ = device;
} }
PaddleDType type(); PaddleDType type() const;
protected: protected:
explicit ZeroCopyTensor(void* scope) : scope_{scope} {} explicit ZeroCopyTensor(void* scope) : scope_{scope} {}
......
...@@ -120,41 +120,7 @@ class AnakinEngineOp : public framework::OperatorBase { ...@@ -120,41 +120,7 @@ class AnakinEngineOp : public framework::OperatorBase {
inference::Singleton<inference::anakin::AnakinEngineManager>::Global() inference::Singleton<inference::anakin::AnakinEngineManager>::Global()
.Get(engine_key_); .Get(engine_key_);
} }
// BUG here, detect that the tensor data pointer here will change sometime.
// Will fix it later.
/*
// For share with the tensor from fluid, We do the net init in the first net
precit.
if (!anakin_engine_->IsInit()) {
auto temp_max_input_shape = anakin_engine_->GetMaxInputShape();
anakin_engine_->AllocTmpMem();
for(auto& input : Inputs("Xs")) {
if (param_names_.count(input)) continue;
platform::CUDAPlace
gpu_place(boost::get<platform::CUDAPlace>(dev_place).device);
auto *input_var = scope.FindVar(input);
auto input_tensor = input_var->GetMutable<framework::LoDTensor>();
auto input_max_shape = temp_max_input_shape[input];
framework::LoDTensor temp_t;
auto t_dims = input_tensor->dims();
temp_t.Resize(t_dims);
TensorCopySync(*input_tensor, dev_place, &temp_t);
input_tensor->Resize(framework::make_ddim(input_max_shape));
input_tensor->mutable_data<float>(dev_place);
TensorCopySync(temp_t, dev_place, input_tensor);
auto* input_data = input_tensor->mutable_data<float>(gpu_place);
auto* anakin_input = anakin_engine_->Net()->get_in(input);
::anakin::saber::Tensor<::anakin::saber::NV>
tmp_anakin_tensor(input_data,
::anakin::saber::NV(), 0, input_max_shape);
anakin_input->share_from(tmp_anakin_tensor);
}
anakin_engine_->InitGraph();
}
*/
return anakin_engine_; return anakin_engine_;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册