prepared_operator.h 7.9 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
W
wanghuancoder 已提交
20

21 22
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/op_kernel_type.h"
J
Jiabin Yang 已提交
23
#include "paddle/fluid/framework/operator.h"
24 25
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/type_defs.h"
26
#include "paddle/fluid/imperative/execution_context.h"
J
Jiabin Yang 已提交
27 28 29
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/type_defs.h"

30 31
#include "paddle/pten/api/include/core.h"

32 33
DECLARE_bool(use_mkldnn);

W
wanghuancoder 已提交
34 35 36 37 38 39 40 41 42 43
namespace paddle {
namespace framework {
class Tensor;
class Variable;
}  // namespace framework
namespace platform {
class DeviceContext;
}  // namespace platform
}  // namespace paddle

J
Jiabin Yang 已提交
44 45 46 47 48
namespace paddle {
namespace imperative {

const framework::Tensor* GetTensorFromVar(const framework::Variable& var);

49 50 51 52 53 54 55 56
template <typename VarType>
static void SetForwardDataTypeOfGradVar(const std::shared_ptr<VarType>& var);

template <>
void SetForwardDataTypeOfGradVar<VariableWrapper>(
    const std::shared_ptr<VariableWrapper>& var) {
  if (var->HasGradVar()) {
    auto grad_var = var->GetGradVar();
57
    VLOG(6) << "Set grad var (" << grad_var->Name() << ")'s forward dtype to ("
58 59 60 61 62 63 64 65 66 67 68 69 70
            << framework::DataTypeToString(var->DataType()) << ").";
    grad_var->SetForwardDataType(var->DataType());
  }
}

template <>
void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
  if (var->HasGradVar()) {
    auto& shared_var = var->SharedVar();
    SetForwardDataTypeOfGradVar<VariableWrapper>(shared_var);
  }
}

71 72 73 74 75
extern const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var);
extern const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var);

76
template <typename VarType>
77
std::shared_ptr<NameVarMap<VarType>> PrepareData(
78 79
    const framework::OperatorWithKernel& op, const NameVarMap<VarType>& ins,
    const framework::OpKernelType& expected_kernel_key) {
80 81 82 83
  std::shared_ptr<NameVarMap<VarType>> tmp_ins_ptr = nullptr;
  for (const auto& name_pair : ins) {
    for (size_t i = 0; i < name_pair.second.size(); ++i) {
      auto& var_base = name_pair.second[i];
84
      SetForwardDataTypeOfGradVar(var_base);
85
      const auto* tensor = GetTensorFromVar(var_base->Var());
86 87 88 89 90 91 92 93
      if (tensor && tensor->IsInitialized()) {
        auto kernel_type_for_var = op.GetKernelTypeForVar(
            name_pair.first, *tensor, expected_kernel_key);
        if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
          continue;
        } else {
          VLOG(3) << "Transform Variable " << var_base->Name() << " from "
                  << kernel_type_for_var << " to " << expected_kernel_key;
94 95 96 97 98 99 100

          if (GetVariableWrapper(var_base)->hasCacheKey(expected_kernel_key)) {
            VLOG(3) << "Hit variable_wrapper cache: key="
                    << expected_kernel_key;
            std::shared_ptr<VariableWrapper> cache_var =
                GetVariableWrapper(var_base)->getCacheValue(
                    expected_kernel_key);
101 102 103
            if (tmp_ins_ptr == nullptr) {
              tmp_ins_ptr = std::make_shared<NameVarMap<VarType>>(ins);
            }
104 105

            const auto* tensor = GetTensorFromVar(cache_var->Var());
106 107
            auto tmp_var = std::make_shared<VarType>(var_base->Name());
            tmp_var->SetType(var_base->Type());
108 109
            SetTensorToVariable(cache_var->Var(), *tensor,
                                tmp_var->MutableVar());
110 111
            (*tmp_ins_ptr)[name_pair.first][i] = tmp_var;
          } else {
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
            framework::Tensor out;
            TransformData(expected_kernel_key, kernel_type_for_var, *tensor,
                          &out);
            if (NeedTransformDataType(kernel_type_for_var,
                                      expected_kernel_key)) {
              // To avoid NameVarMap copy construction overhead in general
              // scenarios, if inplace transformed, return original input
              // directly
              if (tmp_ins_ptr == nullptr) {
                tmp_ins_ptr = std::make_shared<NameVarMap<VarType>>(ins);
              }
              auto tmp_var = std::make_shared<VarType>(var_base->Name());
              tmp_var->SetType(var_base->Type());
              SetTensorToVariable(var_base->Var(), out, tmp_var->MutableVar());
              (*tmp_ins_ptr)[name_pair.first][i] = tmp_var;

              GetVariableWrapper(var_base)->setCacheValue(
                  expected_kernel_key, GetVariableWrapper(tmp_var));
              VLOG(3) << "Set cache to variable_wrapper: key="
                      << expected_kernel_key;
            } else {
              // if dtype is same, transform inplace will not change the
              // original
              // value, transform inplace to avoid multiple copy
              SetTensorToVariable(var_base->Var(), out, var_base->MutableVar());
            }
138
          }
139 140 141 142
        }
      }
    }
  }
143
  return tmp_ins_ptr;
144 145
}

J
Jiabin Yang 已提交
146 147
class PreparedOp {
 public:
148 149
  PreparedOp(const framework::OperatorBase& op,
             const framework::RuntimeContext& ctx,
150
             const framework::OpKernelType& kernel_type,
151
             const framework::OperatorWithKernel::OpKernelFunc& func,
152
             platform::DeviceContext* dev_ctx);
153

154 155 156 157 158 159
  PreparedOp(const framework::OperatorBase& op,
             const framework::RuntimeContext& ctx,
             const framework::OpKernelType& kernel_type,
             const framework::KernelSignature& kernel_signature,
             const pten::Kernel& pt_kernel, platform::DeviceContext* dev_ctx);

160 161 162 163
  static PreparedOp Prepare(const NameVarMap<VarBase>& ins,
                            const NameVarMap<VarBase>& outs,
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place,
164 165
                            const framework::AttributeMap& attrs,
                            const framework::AttributeMap& default_attrs);
166 167 168 169 170

  static PreparedOp Prepare(const NameVarMap<VariableWrapper>& ins,
                            const NameVarMap<VariableWrapper>& outs,
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place,
171 172
                            const framework::AttributeMap& attrs,
                            const framework::AttributeMap& default_attrs);
J
Jiabin Yang 已提交
173

174
  void Run(const NameVarMap<VarBase>& in, const NameVarMap<VarBase>& out,
175 176
           const framework::AttributeMap& attrs,
           const framework::AttributeMap& default_attrs);
177 178 179

  void Run(const NameVarMap<VariableWrapper>& ins,
           const NameVarMap<VariableWrapper>& outs,
180 181
           const framework::AttributeMap& attrs,
           const framework::AttributeMap& default_attrs);
J
Jiabin Yang 已提交
182

183 184
  const framework::OpKernelType& kernel_type() const { return kernel_type_; }

J
Jiabin Yang 已提交
185 186 187
 private:
  const framework::OperatorBase& op_;
  const framework::RuntimeContext& ctx_;
188
  framework::OpKernelType kernel_type_;
J
Jiabin Yang 已提交
189 190
  framework::OperatorWithKernel::OpKernelFunc func_;
  platform::DeviceContext* dev_ctx_;
191 192 193 194 195 196
  // NOTE(chenweihang): Similar op members are used to adapt to
  // new pten kernel, if there is a better design in the future,
  // we may polish the implementation here
  bool run_pten_kernel_{false};
  framework::KernelSignature pt_kernel_signature_;
  pten::Kernel pt_kernel_;
J
Jiabin Yang 已提交
197 198 199 200
};

}  // namespace imperative
}  // namespace paddle