From e94db381baa469d73806e4921a87daab260f5060 Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Mon, 8 Jan 2018 14:05:45 +0800 Subject: [PATCH] Feature/add shared layout (#7233) * "reuse ShareLoD with no regret" * "removed base class shareLayout" * "fix CI" --- paddle/framework/data_transform.cc | 5 ++--- paddle/framework/op_desc.cc | 1 + paddle/framework/operator.cc | 19 +++++++++++++++++++ 3 files changed, 22 insertions(+), 3 deletions(-) diff --git a/paddle/framework/data_transform.cc b/paddle/framework/data_transform.cc index 55825c5b7d2..fed958db158 100644 --- a/paddle/framework/data_transform.cc +++ b/paddle/framework/data_transform.cc @@ -146,9 +146,6 @@ void TransDataLayout(const std::vector& axis, auto* dst = out->GetMutable(); PADDLE_ENFORCE(arity(src.dims()) == 4, "Input Arity Only Suppport 4!"); - auto place = kernel_pair.second.place_; - CopyFrom(src, place, *ctx, dst); - auto src_dim = src.dims(); std::vector dst_dim; @@ -158,6 +155,8 @@ void TransDataLayout(const std::vector& axis, } dst->Resize(make_ddim(dst_dim)); + auto place = kernel_pair.second.place_; + dst->mutable_data(place, src.type()); auto src_type = kernel_pair.first.data_type_; framework::VisitDataType(src_type, CastDataLayout(ctx, axis, src, dst)); diff --git a/paddle/framework/op_desc.cc b/paddle/framework/op_desc.cc index e02e572af2c..47c91290e4b 100644 --- a/paddle/framework/op_desc.cc +++ b/paddle/framework/op_desc.cc @@ -66,6 +66,7 @@ class CompileTimeInferShapeContext : public InferShapeContext { out); out_var->SetLoDLevel(in_var->GetLoDLevel()); } + bool IsRuntime() const override; protected: diff --git a/paddle/framework/operator.cc b/paddle/framework/operator.cc index adc85b1049f..a1f1be5f342 100644 --- a/paddle/framework/operator.cc +++ b/paddle/framework/operator.cc @@ -417,6 +417,25 @@ class RuntimeInferShapeContext : public InferShapeContext { auto in_tensor = in_var->Get(); auto* out_tensor = out_var->GetMutable(); out_tensor->set_lod(in_tensor.lod()); + + // TODO(dzhwinter) : reuse ShareLoD in most operators. + // Need to call ShareLayout explicitly in sequence related ops. + // Shall we have a better method to shared info between in/out Tensor? + out_tensor->set_layout(in_tensor.layout()); + } + + void ShareLayout(const std::string& in, const std::string& out, size_t i = 0, + size_t j = 0) const { + PADDLE_ENFORCE_LT(i, Inputs(in).size()); + PADDLE_ENFORCE_LT(j, Outputs(out).size()); + Variable* in_var = scope_.FindVar(Inputs(in)[i]); + Variable* out_var = scope_.FindVar(Outputs(out)[j]); + if (!in_var->IsType()) return; + PADDLE_ENFORCE(out_var->IsType(), + "The %d-th output of Output(%s) must be LoDTensor.", j, out); + auto in_tensor = in_var->Get(); + auto* out_tensor = out_var->GetMutable(); + out_tensor->set_layout(in_tensor.layout()); } bool IsRuntime() const override { return true; } -- GitLab