prepared_operator.h 21.4 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include <string>
#include <utility>
#include <vector>
W
wanghuancoder 已提交
20

J
Jiabin Yang 已提交
21
#include "paddle/fluid/eager/eager_tensor.h"
22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/op_kernel_type.h"
J
Jiabin Yang 已提交
24
#include "paddle/fluid/framework/operator.h"
25 26
#include "paddle/fluid/framework/pten_utils.h"
#include "paddle/fluid/framework/type_defs.h"
27
#include "paddle/fluid/imperative/execution_context.h"
J
Jiabin Yang 已提交
28 29
#include "paddle/fluid/imperative/layer.h"
#include "paddle/fluid/imperative/type_defs.h"
J
Jiabin Yang 已提交
30
#include "paddle/fluid/imperative/var_helper.h"
J
Jiabin Yang 已提交
31

32
#include "paddle/fluid/framework/convert_utils.h"
33 34
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/selected_rows.h"
35

36 37
DECLARE_bool(use_mkldnn);

J
Jiabin Yang 已提交
38 39 40 41 42
namespace paddle {
namespace imperative {

const framework::Tensor* GetTensorFromVar(const framework::Variable& var);

43 44 45 46 47 48 49 50
template <typename VarType>
static void SetForwardDataTypeOfGradVar(const std::shared_ptr<VarType>& var);

template <>
void SetForwardDataTypeOfGradVar<VariableWrapper>(
    const std::shared_ptr<VariableWrapper>& var) {
  if (var->HasGradVar()) {
    auto grad_var = var->GetGradVar();
51
    VLOG(6) << "Set grad var (" << grad_var->Name() << ")'s forward dtype to ("
52 53 54 55 56 57 58 59 60 61 62 63 64
            << framework::DataTypeToString(var->DataType()) << ").";
    grad_var->SetForwardDataType(var->DataType());
  }
}

template <>
void SetForwardDataTypeOfGradVar<VarBase>(const std::shared_ptr<VarBase>& var) {
  if (var->HasGradVar()) {
    auto& shared_var = var->SharedVar();
    SetForwardDataTypeOfGradVar<VariableWrapper>(shared_var);
  }
}

J
Jiabin Yang 已提交
65
template <>
66 67
void SetForwardDataTypeOfGradVar<egr::EagerVariable>(
    const std::shared_ptr<egr::EagerVariable>& var) {
J
Jiabin Yang 已提交
68 69 70 71 72
  VLOG(10) << "Var in Eager dose not support SetForwardDataTypeOfGradVar: "
           << var->name();
  // TODO(jiabin): SetForwardDataType of Grad var is not supported yet in
  // EagerMode.
}
73

74
template <typename VarType>
75
std::shared_ptr<NameVarMap<VarType>> PrepareData(
76 77
    const framework::OperatorWithKernel& op, const NameVarMap<VarType>& ins,
    const framework::OpKernelType& expected_kernel_key) {
78 79 80
  std::shared_ptr<NameVarMap<VarType>> tmp_ins_ptr = nullptr;
  for (const auto& name_pair : ins) {
    for (size_t i = 0; i < name_pair.second.size(); ++i) {
J
Jiabin Yang 已提交
81 82 83
      auto& template_var = name_pair.second[i];
      SetForwardDataTypeOfGradVar(template_var);
      const auto* tensor = GetTensorFromVar(template_var->Var());
84 85 86 87 88 89
      if (tensor && tensor->IsInitialized()) {
        auto kernel_type_for_var = op.GetKernelTypeForVar(
            name_pair.first, *tensor, expected_kernel_key);
        if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
          continue;
        } else {
J
Jiabin Yang 已提交
90 91 92
          VLOG(3) << "Transform Variable " << GetNameFromVar(template_var)
                  << " from " << kernel_type_for_var << " to "
                  << expected_kernel_key;
93

J
Jiabin Yang 已提交
94
          if (CheckCachedKey(template_var, expected_kernel_key)) {
95 96 97
            VLOG(3) << "Hit variable_wrapper cache: key="
                    << expected_kernel_key;
            std::shared_ptr<VariableWrapper> cache_var =
J
Jiabin Yang 已提交
98
                GetCachedValue(template_var, expected_kernel_key);
99 100 101
            if (tmp_ins_ptr == nullptr) {
              tmp_ins_ptr = std::make_shared<NameVarMap<VarType>>(ins);
            }
102 103

            const auto* tensor = GetTensorFromVar(cache_var->Var());
J
Jiabin Yang 已提交
104 105 106
            auto tmp_var =
                std::make_shared<VarType>(GetNameFromVar(template_var));
            SetType(tmp_var, GetType(template_var));
107 108
            SetTensorToVariable(cache_var->Var(), *tensor,
                                tmp_var->MutableVar());
109 110
            (*tmp_ins_ptr)[name_pair.first][i] = tmp_var;
          } else {
111 112 113 114 115 116 117 118 119 120 121
            framework::Tensor out;
            TransformData(expected_kernel_key, kernel_type_for_var, *tensor,
                          &out);
            if (NeedTransformDataType(kernel_type_for_var,
                                      expected_kernel_key)) {
              // To avoid NameVarMap copy construction overhead in general
              // scenarios, if inplace transformed, return original input
              // directly
              if (tmp_ins_ptr == nullptr) {
                tmp_ins_ptr = std::make_shared<NameVarMap<VarType>>(ins);
              }
J
Jiabin Yang 已提交
122 123 124 125 126
              auto tmp_var =
                  std::make_shared<VarType>(GetNameFromVar(template_var));
              SetType(tmp_var, GetType(template_var));
              SetTensorToVariable(template_var->Var(), out,
                                  tmp_var->MutableVar());
127
              (*tmp_ins_ptr)[name_pair.first][i] = tmp_var;
J
Jiabin Yang 已提交
128
              SetCachedValue(template_var, expected_kernel_key, tmp_var);
129 130 131 132 133 134
              VLOG(3) << "Set cache to variable_wrapper: key="
                      << expected_kernel_key;
            } else {
              // if dtype is same, transform inplace will not change the
              // original
              // value, transform inplace to avoid multiple copy
J
Jiabin Yang 已提交
135 136
              SetTensorToVariable(template_var->Var(), out,
                                  template_var->MutableVar());
137
            }
138
          }
139 140 141 142
        }
      }
    }
  }
143
  return tmp_ins_ptr;
144 145
}

J
Jiabin Yang 已提交
146 147
class PreparedOp {
 public:
148 149
  PreparedOp(const framework::OperatorBase& op,
             const framework::RuntimeContext& ctx,
150
             const framework::OpKernelType& kernel_type,
151
             const framework::OperatorWithKernel::OpKernelFunc& func,
152
             platform::DeviceContext* dev_ctx);
153

154 155 156 157
  PreparedOp(const framework::OperatorBase& op,
             const framework::RuntimeContext& ctx,
             const framework::OpKernelType& kernel_type,
             const framework::KernelSignature& kernel_signature,
158
             const phi::Kernel& pt_kernel, platform::DeviceContext* dev_ctx);
159

160 161 162 163
  static PreparedOp Prepare(const NameVarMap<VarBase>& ins,
                            const NameVarMap<VarBase>& outs,
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place,
164
                            const framework::AttributeMap& attrs,
165
                            const framework::AttributeMap& default_attrs);
166 167 168 169 170

  static PreparedOp Prepare(const NameVarMap<VariableWrapper>& ins,
                            const NameVarMap<VariableWrapper>& outs,
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place,
171
                            const framework::AttributeMap& attrs,
172
                            const framework::AttributeMap& default_attrs);
J
Jiabin Yang 已提交
173

174 175
  static PreparedOp Prepare(const NameVarMap<egr::EagerVariable>& ins,
                            const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
176 177 178 179 180
                            const framework::OperatorWithKernel& op,
                            const platform::Place& place,
                            const framework::AttributeMap& attrs,
                            const framework::AttributeMap& default_attrs);

181
  void Run(const NameVarMap<VarBase>& in, const NameVarMap<VarBase>& out,
182 183
           const framework::AttributeMap& attrs,
           const framework::AttributeMap& default_attrs);
184 185 186

  void Run(const NameVarMap<VariableWrapper>& ins,
           const NameVarMap<VariableWrapper>& outs,
187 188
           const framework::AttributeMap& attrs,
           const framework::AttributeMap& default_attrs);
J
Jiabin Yang 已提交
189

190 191
  void Run(const NameVarMap<egr::EagerVariable>& ins,
           const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
192 193 194
           const framework::AttributeMap& attrs,
           const framework::AttributeMap& default_attrs);

195 196
  const framework::OpKernelType& kernel_type() const { return kernel_type_; }

J
Jiabin Yang 已提交
197 198 199
 private:
  const framework::OperatorBase& op_;
  const framework::RuntimeContext& ctx_;
200
  framework::OpKernelType kernel_type_;
J
Jiabin Yang 已提交
201 202
  framework::OperatorWithKernel::OpKernelFunc func_;
  platform::DeviceContext* dev_ctx_;
203 204 205 206
  // NOTE(chenweihang): Similar op members are used to adapt to
  // new pten kernel, if there is a better design in the future,
  // we may polish the implementation here
  bool run_pten_kernel_{false};
L
Liu-xiandong 已提交
207
  bool run_kp_kernel_{false};
208
  framework::KernelSignature pt_kernel_signature_;
209
  phi::Kernel pt_kernel_;
J
Jiabin Yang 已提交
210 211
};

212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229
const inline framework::Attribute& GetAttr(
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs, const std::string& name) {
  auto it = attrs.find(name);
  bool found = it != attrs.end();
  if (!found) {
    it = default_attrs.find(name);
    found = it != default_attrs.end();
  }
  PADDLE_ENFORCE_EQ(
      found, true,
      platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
  return it->second;
}

template <typename VarType>
void BuildDygraphPtenKernelContext(
    const framework::KernelSignature& pt_kernel_signature,
230
    const phi::Kernel& pt_kernel, const NameVarMap<VarType>& ins,
231 232
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
233
    platform::DeviceContext* dev_ctx, phi::KernelContext* kernel_ctx) {
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
  kernel_ctx->SetDeviceContext(dev_ctx);

  auto& input_names = std::get<0>(pt_kernel_signature.args);
  auto& attr_names = std::get<1>(pt_kernel_signature.args);
  auto& output_names = std::get<2>(pt_kernel_signature.args);

  auto& input_defs = pt_kernel.args_def().input_defs();
  auto& output_defs = pt_kernel.args_def().output_defs();
  auto& attr_defs = pt_kernel.args_def().attribute_defs();

  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));

  PADDLE_ENFORCE_EQ(output_names.size(), output_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of outputs_args names (%d) must be equal to "
                        "the size of kernel output_defs (%d).",
                        output_names.size(), output_defs.size()));

  PADDLE_ENFORCE_EQ(attr_names.size(), attr_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of attribute_args names (%d) must be equal "
                        "to the size of kernel attribute_defs (%d).",
                        attr_names.size(), attr_defs.size()));

  for (size_t i = 0; i < input_names.size(); ++i) {
H
hong 已提交
263
    auto it = ins.find(input_names[i]);
264 265 266

    size_t start_idx = (i == 0 ? 0 : kernel_ctx->InputRangeAt(i - 1).second);

H
hong 已提交
267 268
    if ((it == ins.end()) &&
        (input_defs[i].type_index ==
269
         std::type_index(typeid(paddle::optional<const phi::DenseTensor&>)))) {
H
hong 已提交
270 271 272
      kernel_ctx->EmplaceBackInputWithoutSetRange(nullptr);
      auto end_idx = start_idx + 1;
      kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
273 274 275 276 277 278
      continue;
    }
    auto ins_vector = it->second;
    size_t end_idx = start_idx + ins_vector.size();

    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
279
      const phi::TensorBase* tensor_in = nullptr;
280
      auto& var = ins_vector[offset]->Var();
281 282 283 284
      if (var.template IsType<phi::DenseTensor>()) {
        tensor_in = &(var.template Get<phi::DenseTensor>());
      } else if (var.template IsType<phi::SelectedRows>()) {
        tensor_in = &(var.template Get<phi::SelectedRows>());
285 286 287 288
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported input `%s` type when call pt kernel.",
            framework::ToTypeName(var.Type())));
289
      }
290
      kernel_ctx->EmplaceBackInputWithoutSetRange(tensor_in);
291
    }
292
    kernel_ctx->AssignInputRange(std::make_pair(start_idx, end_idx), i);
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
  }

  for (size_t i = 0; i < output_names.size(); ++i) {
    size_t start_idx = (i == 0 ? 0 : kernel_ctx->OutputRangeAt(i - 1).second);

    auto iter = outs.find(output_names[i]);
    if (iter == outs.end()) {
      kernel_ctx->EmplaceBackOutputWithoutSetRange({nullptr});
      kernel_ctx->AssignOutputRange(std::make_pair(start_idx, start_idx + 1),
                                    i);
      continue;
    }

    auto& outs_vector = iter->second;
    size_t end_idx = start_idx + outs_vector.size();

    for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
      if (outs_vector[offset] == nullptr) {
        kernel_ctx->EmplaceBackOutputWithoutSetRange({nullptr});
        continue;
      }
314

315
      phi::TensorBase* tensor_out = nullptr;
316
      auto* var = outs_vector[offset]->MutableVar();
317 318 319 320
      if (var->template IsType<phi::DenseTensor>()) {
        tensor_out = var->template GetMutable<phi::DenseTensor>();
      } else if (var->template IsType<phi::SelectedRows>()) {
        tensor_out = var->template GetMutable<phi::SelectedRows>();
321 322 323 324
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported output `%s` type when call pt kernel.",
            framework::ToTypeName(var->Type())));
325
      }
326

327 328
      experimental::ResetTensorDtypeAndLayoutByArgDef(tensor_out,
                                                      output_defs.at(i));
329
      framework::SetAllocationForOutputTenosr(
330
          tensor_out, phi::TransToPtenPlace(output_defs.at(i).backend));
331 332 333 334 335 336 337

      kernel_ctx->EmplaceBackOutputWithoutSetRange(tensor_out);
    }
    kernel_ctx->AssignOutputRange(std::make_pair(start_idx, end_idx), i);
  }

  for (size_t i = 0; i < attr_names.size(); ++i) {
338
    if (attr_defs[i].type_index == std::type_index(typeid(phi::ScalarArray))) {
339 340 341 342 343 344
      if (attrs.find(attr_names[i]) !=
          attrs.end()) {  // shape is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int64_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
345
              phi::ScalarArray(BOOST_GET_CONST(std::vector<int64_t>, attr))));
346 347 348
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::vector<int32_t>))) {
          kernel_ctx->EmplaceBackAttr(std::move(
349
              phi::ScalarArray(BOOST_GET_CONST(std::vector<int32_t>, attr))));
C
chentianyu03 已提交
350 351 352
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(int64_t))) {
          kernel_ctx->EmplaceBackAttr(
353
              std::move(phi::ScalarArray(&BOOST_GET_CONST(int64_t, attr), 1)));
C
chentianyu03 已提交
354 355 356
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(int32_t))) {
          kernel_ctx->EmplaceBackAttr(
357
              std::move(phi::ScalarArray(&BOOST_GET_CONST(int32_t, attr), 1)));
H
hong 已提交
358 359 360 361
        } else if (attr_defs[i].type_index ==
                   std::type_index(typeid(std::vector<int32_t>))) {
          const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
          kernel_ctx->EmplaceBackAttr(vector_int_attr);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to VectorTensor when "
              "construct KernelContext.",
              attr_names[i]));
        }
      } else {  // shape is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        if (ins_vector.size() == 1) {  // ShapeTensor
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVar(ins_vector[0]->Var())));
        } else {  // ShapeTensorList
          std::vector<framework::Variable*> variables;
          variables.reserve(ins_vector.size());
          for (const auto& var_base : ins_vector) {
            variables.push_back(var_base->MutableVar());
          }
          kernel_ctx->EmplaceBackAttr(std::move(
              experimental::MakePtenScalarArrayFromVarList(variables)));
        }
      }
    } else if (attr_defs[i].type_index ==
384
               std::type_index(typeid(phi::Scalar))) {
385 386 387 388 389 390 391 392 393
      // TODO(chenweihang): support other attrs later
      // TODO(zhangyunfei): Scalar should hold scaler type, and we should check
      // attribtue type by attr_defs
      if (attrs.find(attr_names[i]) != attrs.end() ||
          default_attrs.find(attr_names[i]) !=
              default_attrs.end()) {  // scalar is in the attribute
        auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
        if (std::type_index(attr.type()) == std::type_index(typeid(float))) {
          kernel_ctx->EmplaceBackAttr(
394
              std::move(phi::Scalar(BOOST_GET_CONST(float, attr))));
395 396 397
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(std::string))) {
          kernel_ctx->EmplaceBackAttr(
398
              std::move(phi::Scalar(BOOST_GET_CONST(std::string, attr))));
399 400 401
        } else if (std::type_index(attr.type()) ==
                   std::type_index(typeid(int))) {
          kernel_ctx->EmplaceBackAttr(
402
              std::move(phi::Scalar(BOOST_GET_CONST(int, attr))));
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
        } else {
          PADDLE_THROW(platform::errors::Unimplemented(
              "Unsupported cast op attribute `%s` to Scalar when construct "
              "KernelContext in dygraph.",
              attr_names[i]));
        }
      } else {  // scalar is in the input
        auto& ins_vector = ins.at(attr_names[i]);
        kernel_ctx->EmplaceBackAttr(std::move(
            experimental::MakePtenScalarFromVar(ins_vector[0]->Var())));
      }

    } else {
      // TODO(chenweihang): support other attrs later
      auto& attr = GetAttr(attrs, default_attrs, attr_names[i]);
      if (attr_defs[i].type_index == std::type_index(typeid(int))) {
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int, attr));
      } else if (attr_defs[i].type_index == std::type_index(typeid(float))) {
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(float, attr));
      } else if (attr_defs[i].type_index == std::type_index(typeid(bool))) {
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(bool, attr));
H
hong 已提交
424 425
      } else if (attr_defs[i].type_index == std::type_index(typeid(int64_t))) {
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int64_t, attr));
H
hong 已提交
426 427 428
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::string))) {
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(std::string, attr));
429
      } else if (attr_defs[i].type_index ==
430
                 std::type_index(typeid(phi::DataType))) {
431
        auto data_type = framework::TransToPtenDataType(
432 433 434 435 436 437 438 439 440 441 442 443 444
            static_cast<framework::proto::VarType::Type>(
                BOOST_GET_CONST(int, attr)));
        kernel_ctx->EmplaceBackAttr(data_type);
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int64_t>))) {
        if (std::type_index(attr.type()) ==
            std::type_index(typeid(std::vector<int>))) {
          // Emplace Back Attr according to the type of Pten_Kernel args.
          const auto& vector_int_attr = BOOST_GET_CONST(std::vector<int>, attr);
          const std::vector<int64_t> vector_int64_attr(vector_int_attr.begin(),
                                                       vector_int_attr.end());
          kernel_ctx->EmplaceBackAttr(vector_int64_attr);
        }
445 446 447
      } else if (attr_defs[i].type_index ==
                 std::type_index(typeid(std::vector<int>))) {
        kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(std::vector<int>, attr));
448 449 450 451 452 453 454 455 456 457 458
      } else {
        PADDLE_THROW(platform::errors::Unimplemented(
            "Unsupported cast op attribute `%s` when construct "
            "KernelContext in dygraph.",
            attr_names[i]));
      }
    }
  }
}

template <typename VarType>
459
void PreparePtenData(const phi::Kernel& pt_kernel,
460 461 462 463 464 465 466 467 468 469 470 471 472
                     const framework::KernelSignature& pt_kernel_signature,
                     const NameVarMap<VarType>& ins) {
  auto& input_names = std::get<0>(pt_kernel_signature.args);
  auto& input_defs = pt_kernel.args_def().input_defs();

  PADDLE_ENFORCE_EQ(input_names.size(), input_defs.size(),
                    platform::errors::InvalidArgument(
                        "the size of inputs_args names (%d) must be equal to "
                        "the size of kernel input_defs (%d).",
                        input_names.size(), input_defs.size()));

  for (size_t i = 0; i < input_names.size(); ++i) {
    auto& in_def = input_defs.at(i);
473
    if (ins.find(input_names[i]) == ins.end()) {
H
hong 已提交
474 475
      continue;
    }
476 477 478
    auto& ins_vector = ins.at(input_names[i]);

    for (size_t offset = 0; offset < ins_vector.size(); ++offset) {
J
Jiabin Yang 已提交
479 480
      auto var = ins_vector[offset];
      const auto* tensor_in = GetTensorFromVar(var->Var());
481
      if (tensor_in && tensor_in->IsInitialized()) {
482 483 484
        if (in_def.backend == phi::Backend::ALL_BACKEND) {
          continue;
        }
485
        auto expected_place = phi::TransToPtenPlace(in_def.backend);
486 487 488 489 490 491 492 493 494 495
        if (platform::is_same_place(tensor_in->place(), expected_place)) {
          continue;
        }

        VLOG(3) << "Pten Transform Variable " << input_names[i] << " from "
                << tensor_in->place() << " to " << expected_place;

        framework::Tensor tmp_tensor;
        framework::TensorCopySync(*tensor_in, expected_place, &tmp_tensor);

J
Jiabin Yang 已提交
496
        SetTensorToVariable(var->Var(), tmp_tensor, var->MutableVar());
497 498 499 500 501
      }
    }
  }
}

J
Jiabin Yang 已提交
502 503
}  // namespace imperative
}  // namespace paddle