infershape_utils.h 5.3 KB
Newer Older
C
Chen Weihang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <string>

#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/shape_inference.h"
21
#include "paddle/phi/core/infermeta_utils.h"
22
#include "paddle/phi/core/meta_tensor.h"
C
Chen Weihang 已提交
23 24 25 26

namespace paddle {
namespace framework {

27 28 29
// TODO(chenweihang): Support TensorArray later
class CompatMetaTensor : public phi::MetaTensor {
 public:
30 31
  explicit CompatMetaTensor(bool is_runtime)
      : is_runtime_(is_runtime), initialized_(false) {}
32 33 34 35
  CompatMetaTensor(InferShapeVarPtr var, bool is_runtime)
      : var_(std::move(var)), is_runtime_(is_runtime) {}

  CompatMetaTensor(CompatMetaTensor&&) = default;
36 37 38
  CompatMetaTensor& operator=(CompatMetaTensor&&) = default;
  CompatMetaTensor(const CompatMetaTensor&) = default;
  CompatMetaTensor& operator=(const CompatMetaTensor&) = default;
39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59

  int64_t numel() const override;

  DDim dims() const override;

  phi::DataType dtype() const override;

  DataLayout layout() const override;

  void set_dims(const DDim& dims) override;

  void set_dtype(phi::DataType dtype) override;

  void set_layout(DataLayout layout) override;

  void share_lod(const MetaTensor& meta_tensor) override;

  void share_dims(const MetaTensor& meta_tensor) override;

  void share_meta(const MetaTensor& meta_tensor) override;

60 61
  bool initialized() const override { return initialized_; };

Y
YuanRisheng 已提交
62 63 64 65
  bool is_selected_rows() const;
  bool is_tensor_array() const;
  bool is_dense() const;

66 67 68 69 70 71
  operator unspecified_bool_type() const override {
    return initialized_ ? unspecified_bool_true : 0;
  }

  bool operator!() const override { return !initialized_; }

72 73
 private:
  const LoD& GetRuntimeLoD() const {
R
Ruibiao Chen 已提交
74
    auto* var = PADDLE_GET_CONST(Variable*, var_);
75 76 77 78
    return var->Get<LoDTensor>().lod();
  }

  int32_t GetCompileTimeLoD() const {
R
Ruibiao Chen 已提交
79
    auto* var = PADDLE_GET_CONST(VarDesc*, var_);
80 81 82 83
    return var->GetLoDLevel();
  }

  const phi::SelectedRows& GetSelectedRows() const {
84 85
    PADDLE_ENFORCE_EQ(is_runtime_,
                      true,
86 87
                      platform::errors::Unavailable(
                          "Only can get Tensor from MetaTensor in rumtime."));
R
Ruibiao Chen 已提交
88
    auto* var = PADDLE_GET_CONST(Variable*, var_);
89 90
    PADDLE_ENFORCE_EQ(var->IsType<phi::SelectedRows>(),
                      true,
91 92 93 94 95 96 97
                      platform::errors::Unavailable(
                          "The Tensor in MetaTensor is not SelectedRows."));
    return var->Get<phi::SelectedRows>();
  }

  InferShapeVarPtr var_;
  bool is_runtime_;
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
  bool initialized_{true};
};

// Note: In order to avoid using shared_ptr to manage MetaTensor in
// InferMetaContext, inherit and implement InferMetaContext separately
// for compatibility with fluid, shared_ptr will cause significant decrease
// in scheduling performance
class CompatInferMetaContext : public phi::InferMetaContext {
 public:
  CompatInferMetaContext() = default;
  explicit CompatInferMetaContext(phi::MetaConfig config)
      : phi::InferMetaContext(config) {}

  void EmplaceBackInput(CompatMetaTensor input);
  void EmplaceBackOutput(CompatMetaTensor output);

  void EmplaceBackInputs(
C
Chen Weihang 已提交
115 116
      paddle::small_vector<CompatMetaTensor, phi::kInputSmallVectorSize>
          inputs);
117
  void EmplaceBackOutputs(
C
Chen Weihang 已提交
118
      paddle::small_vector<CompatMetaTensor, phi::kOutputSmallVectorSize>
119 120 121 122 123 124
          outputs);

  const phi::MetaTensor& InputAt(size_t idx) const override;

  std::vector<const phi::MetaTensor*> InputsBetween(size_t start,
                                                    size_t end) const override;
125 126
  paddle::optional<std::vector<const phi::MetaTensor*>> OptionalInputsBetween(
      size_t start, size_t end) const override;
127 128 129 130 131 132 133 134

  phi::MetaTensor* MutableOutputAt(size_t idx) override;
  std::vector<phi::MetaTensor*> MutableOutputBetween(size_t start,
                                                     size_t end) override;

  virtual ~CompatInferMetaContext() = default;

 private:
C
Chen Weihang 已提交
135
  paddle::small_vector<CompatMetaTensor, phi::kInputSmallVectorSize>
136
      compat_inputs_;
C
Chen Weihang 已提交
137
  paddle::small_vector<CompatMetaTensor, phi::kOutputSmallVectorSize>
138
      compat_outputs_;
139 140
};

141 142 143 144 145 146 147 148 149 150 151 152 153
CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx,
                                             const std::string& op_type);

#define DECLARE_INFER_SHAPE_FUNCTOR(op_type, functor_name, fn)      \
  struct functor_name : public paddle::framework::InferShapeBase {  \
    void operator()(                                                \
        paddle::framework::InferShapeContext* ctx) const override { \
      auto infer_meta_context =                                     \
          paddle::framework::BuildInferMetaContext(ctx, #op_type);  \
      fn(&infer_meta_context);                                      \
    }                                                               \
  }

C
Chen Weihang 已提交
154 155
}  // namespace framework
}  // namespace paddle