infer_shape_pass.cc 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/ir/ipu/infer_shape_pass.h"
16

17 18 19 20
#include "paddle/fluid/framework/ir/graph_helper.h"
#include "paddle/fluid/framework/ir/pass_tester_helper.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/variable_helper.h"
J
jianghaicheng 已提交
21
#include "paddle/fluid/platform/device/ipu/ipu_backend.h"
22
#include "paddle/phi/core/ddim.h"
23 24 25 26 27 28 29 30 31 32

namespace paddle {
namespace framework {
namespace ir {

void InferShapePass::ApplyImpl(ir::Graph* graph) const {
  VLOG(10) << "enter InferShapePass::ApplyImpl";
  VLOG(10) << "Raw Graph: ";
  VLOG(10) << DebugString(graph);

33 34 35 36
  // Make batch_size fixed
  bool need_infer_shape = false;
  auto ipu_backend = platform::ipu::IpuBackend::GetInstance();
  auto micro_batch_size = ipu_backend->GetIpuStrategy()->micro_batch_size;
37 38 39 40 41
  auto feed_list = Get<std::vector<std::string>>("feed_list");
  for (auto node : graph->Nodes()) {
    if (!node->IsVar()) {
      continue;
    }
42 43 44
    bool is_feed =
        std::find(feed_list.begin(), feed_list.end(), node->Name()) !=
        feed_list.end();
45 46 47
    if (is_feed) {
      auto input_shape = node->Var()->GetShape();
      if (input_shape[0] <= -1) {
48
        input_shape[0] = micro_batch_size;
49
        node->Var()->SetShape(input_shape);
50
        need_infer_shape = true;
51 52 53 54 55
      }
      // int64->int32
      if (node->Var()->GetDataType() == proto::VarType::INT64) {
        node->Var()->SetDataType(proto::VarType::INT32);
      }
56 57 58 59
      // float64->float32
      if (node->Var()->GetDataType() == proto::VarType::FP64) {
        node->Var()->SetDataType(proto::VarType::FP32);
      }
60 61 62 63
    }
  }

  // temp scope for shape inference
64 65 66 67 68 69 70 71 72 73
  if (need_infer_shape) {
    std::shared_ptr<paddle::framework::Scope> scope(
        new paddle::framework::Scope());
    for (auto node : graph->Nodes()) {
      if (!node->IsVar()) {
        continue;
      }
      auto var_desc = node->Var();
      auto* ptr = scope->Var(var_desc->Name());
      paddle::framework::InitializeVariable(ptr, var_desc->GetType());
74

75
      auto tensor = ptr->GetMutable<paddle::framework::LoDTensor>();
76
      tensor->Resize(phi::make_ddim(var_desc->GetShape()));
77
    }
78

79 80 81 82 83 84 85 86 87
    // infer shape
    auto nodes = ir::TopologySortOperations(*graph);
    for (auto node : nodes) {
      VLOG(10) << "InferShapePass: Infer shape for Op (" << node->Name() << ")";
      auto op_desc = node->Op();
      if (op_desc->Type() == "popart_optimizer") {
        continue;
      }
      auto op = paddle::framework::OpRegistry::CreateOp(*op_desc);
88 89
      paddle::framework::RuntimeContext ctx(
          op->Inputs(), op->Outputs(), *scope);
90
      op->RuntimeInferShape(*scope, paddle::platform::CPUPlace(), ctx);
91

92 93 94 95 96
      for (auto it = ctx.outputs.begin(); it != ctx.outputs.end(); it++) {
        for (int i = 0; i < it->second.size(); i++) {
          auto output_name = op_desc->Output(it->first)[i];
          auto dim =
              it->second[i]->GetMutable<paddle::framework::LoDTensor>()->dims();
97
          auto new_shape = phi::vectorize(dim);
98 99 100 101 102 103 104 105 106 107 108 109 110 111
          for (auto output_node : node->outputs) {
            if (output_node->Name() == output_name) {
              output_node->Var()->SetShape(new_shape);
              if (VLOG_IS_ON(10)) {
                std::ostringstream sout;
                sout << "InferShapePass: output[" << output_node->Name()
                     << "], infer shape:[";
                for (auto s : new_shape) {
                  sout << std::to_string(s) << ", ";
                }
                sout << "]";
                VLOG(10) << sout.str();
              }
            }
112 113 114
          }
        }
      }
115 116
      VLOG(10) << "InferShapePass: Infer shape for Op (" << node->Name()
               << ") finished";
117
    }
118 119
    // release the temp scope
    scope.reset();
120 121 122 123 124 125 126 127 128 129 130 131 132
  }

  VLOG(10) << "Post Graph: ";
  VLOG(10) << DebugString(graph);
  VLOG(10) << "leave InferShapePass::ApplyImpl";
}

}  // namespace ir
}  // namespace framework
}  // namespace paddle

REGISTER_PASS(infer_shape_pass, paddle::framework::ir::InferShapePass)
    .RequirePassAttr("feed_list");