reshape_op.cc 5.4 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "lite/operators/reshape_op.h"
16
#include "lite/kernels/npu/bridges/graph.h"
Z
zhupengyang 已提交
17
#include "lite/kernels/npu/bridges/registry.h"
18
#include "lite/kernels/npu/bridges/utility.h"
Y
Yan Chunwei 已提交
19 20 21

namespace paddle {
namespace lite {
22
namespace subgraph {
Y
Yan Chunwei 已提交
23 24
namespace npu {

25
int ReshapeConverter(void* ctx, OpLite* op, KernelBase* kernel) {
26 27 28 29
  CHECK(ctx != nullptr);
  CHECK(op != nullptr);
  auto graph = static_cast<Graph*>(ctx);
  auto op_info = op->op_info();
Y
Yan Chunwei 已提交
30
  auto op_type = op_info->Type();
31 32
  auto scope = op->scope();
  VLOG(3) << "[NPU] Converting " + op_type + "...";
Y
Yan Chunwei 已提交
33

34 35 36 37 38 39
  // Get input and output vars and op attributes
  auto x_name = op_info->Input("X").front();
  auto x_type = kernel->GetInputDeclType("X");
  CHECK(x_type->precision() == PRECISION(kFloat));
  CHECK(x_type->layout() == DATALAYOUT(kNCHW));
  auto x = scope->FindMutableTensor(x_name);
Y
Yan Chunwei 已提交
40
  auto x_dims = x->dims();
41 42 43 44
  auto out_name = op_info->Output("Out").front();
  auto out_type = kernel->GetOutputDeclType("Out");
  CHECK(out_type->precision() == PRECISION(kFloat));
  CHECK(out_type->layout() == DATALAYOUT(kNCHW));
Y
Yan Chunwei 已提交
45

46 47 48 49 50 51 52 53 54 55 56
  // X node
  std::shared_ptr<ge::Operator> x_node = nullptr;
  if (graph->HasNode(x_name)) {
    x_node = graph->GetNode(x_name);
  } else {
    x_node = graph->AddNode(x_name, x_dims);
  }

  // Reshape node
  auto reshape_node = graph->AddNode<ge::op::Reshape>(out_name);
  reshape_node->set_input_tensor(*x_node);
Y
Yan Chunwei 已提交
57

58 59 60 61 62
  // Read shape from "ShapeTensor"(input), or "Shape"(input), or "shape"(attr)
  if (HasInputArg(op_info, scope, "ShapeTensor")) {
    LOG(WARNING) << "[NPU] not support \"Shape\" from more than one Tensor.";
    return FAILED;
  } else if (HasInputArg(op_info, scope, "Shape")) {
63 64 65 66 67 68 69 70 71
    auto actual_shape_name = op_info->Input("Shape").front();
    // auto actual_shape_type = kernel->GetInputDeclType("Shape");
    // CHECK(actual_shape_type->precision() == PRECISION(kInt32));
    // CHECK(actual_shape_type->layout() == DATALAYOUT(kNCHW));
    std::shared_ptr<ge::Operator> actual_shape_node = nullptr;
    if (graph->HasNode(actual_shape_name)) {
      actual_shape_node = graph->GetNode(actual_shape_name);
    } else {
      auto actual_shape = scope->FindMutableTensor(actual_shape_name);
Y
Yan Chunwei 已提交
72 73 74 75 76
      auto actual_shape_dims = actual_shape->dims();
      auto actual_shape_data = actual_shape->mutable_data<int>();
      auto shape =
          std::vector<int>(actual_shape_data,
                           actual_shape_data + actual_shape_dims.production());
77
      auto out_dims = lite::operators::ValidateShape(shape, x_dims);
Y
Yan Chunwei 已提交
78 79
      auto out_shape = out_dims.Vectorize();
      if (out_shape.size() > 4) {
80 81 82
        LOG(WARNING) << "[NPU] HiAI DDK only supports less than 4 dimensions, "
                        "but Shape has "
                     << out_shape.size();
Y
Yan Chunwei 已提交
83 84
      }
      auto actual_shape_const_node =
85
          graph->AddNode(actual_shape_name,
86
                         std::vector<int>(out_shape.begin(), out_shape.end()));
87
      actual_shape_node = actual_shape_const_node;
Y
Yan Chunwei 已提交
88
    }
89
    reshape_node->set_input_w(*actual_shape_node);
Y
Yan Chunwei 已提交
90 91
  } else {
    auto shape = op_info->GetAttr<std::vector<int>>("shape");
92
    auto out_dims = lite::operators::ValidateShape(shape, x_dims);
Y
Yan Chunwei 已提交
93 94
    auto out_shape = out_dims.Vectorize();
    if (out_shape.size() > 4) {
95 96 97
      LOG(WARNING) << "[NPU] HiAI DDK only supports less than 4 dimensions, "
                      "but shape has "
                   << out_shape.size();
Y
Yan Chunwei 已提交
98 99 100 101 102
    }
    reshape_node->set_attr_shape(
        ge::AttrValue::LIST_INT(out_shape.begin(), out_shape.end()));
  }

103
  // XShape node
Y
Yan Chunwei 已提交
104
  if (op_type == "reshape2") {
105
    // Append an extra reshape node to calc XShape
Y
Yan Chunwei 已提交
106 107 108 109 110
    std::vector<int64_t> xshape_dims(x_dims.size() + 1, 1);
    for (size_t i = 0; i < x_dims.size(); i++) {
      xshape_dims[i + 1] = x_dims[i];
    }
    if (xshape_dims.size() > 4) {
111 112 113
      LOG(WARNING) << "[NPU] HiAI DDK only supports less than 4 dimensions, "
                      "but XShape has "
                   << xshape_dims.size();
114
      return FAILED;
Y
Yan Chunwei 已提交
115
    }
116 117 118 119 120 121
    auto xshape_name = op_info->Output("XShape").front();
    // auto xshape_type = kernel->GetOutputDeclType("XShape");
    // CHECK(xshape_type->precision() == PRECISION(kFloat));
    // CHECK(xshape_type->layout() == DATALAYOUT(kNCHW));
    auto xshape_node = graph->AddNode<ge::op::Reshape>(xshape_name);
    xshape_node->set_input_tensor(*x_node);
Y
Yan Chunwei 已提交
122 123 124
    xshape_node->set_attr_shape(
        ge::AttrValue::LIST_INT(xshape_dims.begin(), xshape_dims.end()));
  }
125
  return REBUILD_WHEN_SHAPE_CHANGED;
Y
Yan Chunwei 已提交
126 127 128
}

}  // namespace npu
129
}  // namespace subgraph
Y
Yan Chunwei 已提交
130 131 132
}  // namespace lite
}  // namespace paddle

133 134 135 136 137 138
REGISTER_SUBGRAPH_BRIDGE(NPU,
                         reshape,
                         paddle::lite::subgraph::npu::ReshapeConverter);
REGISTER_SUBGRAPH_BRIDGE(NPU,
                         reshape2,
                         paddle::lite::subgraph::npu::ReshapeConverter);