data_transfer.h 5.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <string>

#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/framework/new_executor/new_executor_defs.h"
#include "paddle/fluid/framework/op_kernel_type.h"

namespace paddle {
namespace framework {
namespace interpreter {

/*
 * A Helper class to implement data transform operation.
 * It will apply layout/dtype/device transfer by turns.
 */
class DataTranferHelper {
 public:
  DataTranferHelper(const platform::Place& place, VariableScope* var_scope)
      : place_(place), var_scope_(var_scope) {}

  bool apply(const OpKernelType& kernel_type_for_var,
             const OpKernelType& expected_kernel_key,
             const std::string& var_name, std::string* new_var_name,
L
Leo Chen 已提交
38 39
             std::vector<OpFuncNode>* new_op_func_nodes, bool use_local_scope,
             bool is_fetch_v2);
40

41 42 43
  void RunAndConstructShareNode(const std::string& src_var_name,
                                const std::string& dst_var_name,
                                std::vector<OpFuncNode>* op_func_nodes);
44 45 46 47 48

  void RunAndConstructOpFuncNode(const std::shared_ptr<OperatorBase>& op,
                                 const std::string& var_name,
                                 const std::string& new_var_name,
                                 std::vector<OpFuncNode>* op_func_nodes);
49 50 51 52

 private:
  platform::Place place_;
  VariableScope* var_scope_;
53 54 55 56 57
};

void ApplyDataTransform(const OpKernelType& expected_kernel_key,
                        const platform::Place& place,
                        VariableValueMap* ins_map_temp,
58
                        VariableValueMap* outs_map_temp,
59 60 61 62
                        VariableScope* var_scope, OpFuncNode* op_func_node,
                        std::vector<OpFuncNode>* op_func_nodes,
                        bool use_local_scope = true);

63 64 65 66 67 68 69 70
void HandleComplexGradToRealGrad(const OpFuncNode& op_func_node,
                                 const platform::Place& place,
                                 const VariableNameMap& out_names,
                                 VariableValueMap* out_vars,
                                 VariableScope* var_scope,
                                 std::vector<OpFuncNode>* op_func_nodes,
                                 framework::Scope* local_scope);

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
std::string get_memcpy_type(const platform::Place& src_place,
                            const platform::Place& dst_place);

inline bool need_device_transform(const OpKernelType& kernel_type_for_var,
                                  const OpKernelType& expected_kernel_key) {
  auto& src_place = kernel_type_for_var.place_;
  auto& dst_place = expected_kernel_key.place_;
  if (platform::is_same_place(src_place, dst_place) ||
      (platform::is_cuda_pinned_place(src_place) &&
       platform::is_cpu_place(dst_place))) {
    return false;
  }
  return true;
}

inline bool need_dtype_transform(const OpKernelType& kernel_type_for_var,
                                 const OpKernelType& expected_kernel_key) {
  return framework::NeedTransformDataType(kernel_type_for_var,
                                          expected_kernel_key);
}

inline bool need_layout_transform(const OpKernelType& kernel_type_for_var,
                                  const OpKernelType& expected_kernel_key) {
  return framework::NeedTransformLayout(kernel_type_for_var.data_layout_,
                                        expected_kernel_key.data_layout_);
}

L
Leo Chen 已提交
98 99 100 101
std::shared_ptr<OperatorBase> TransferLayout(
    const std::string& var_name, std::string* new_var_name,
    DataLayout in_layout, DataLayout out_layout, VariableScope* var_scope,
    framework::Scope* local_scope, bool is_fetch_v2);
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

std::shared_ptr<OperatorBase> TransferDtype(const std::string& var_name,
                                            std::string* new_var_name,
                                            proto::VarType::Type in_dtype,
                                            proto::VarType::Type out_dtype,
                                            VariableScope* var_scope,
                                            framework::Scope* local_scope);

std::shared_ptr<OperatorBase> TransferDevice(const std::string& var_name,
                                             std::string* new_var_name,
                                             const platform::Place& src_place,
                                             const platform::Place& dst_place,
                                             VariableScope* var_scope,
                                             framework::Scope* local_scope);

}  // namespace interpreter
}  // namespace framework
}  // namespace paddle