diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 72240785102a948940aeef56a949a78b6d5359c7..c6da5403dc5fcbc33a23f02c5f7504fa61d1dc66 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1565,16 +1565,41 @@ void OperatorWithKernel::CheckWhetherPreparePhiData( const VariableNameMap& outnames, const Scope& scope) const { if (run_phi_kernel_ && impl_ != nullptr) { - // For feed, there are two situations we need prepare phi data. - // 1. Sometimes the output's tensor in cached PhiKernelContext is - // inconsistent with the variable in scope. So we need prepare phi data. - // 2. Somehow, sometimes the input's tensor in cached PhiKernelContext has - // some problems. When we use these inputs, we get segmentfault and we must - // prepare phi data. - if (Type() == "feed") { + const auto& phi_kernel_context = impl_->getKernelContext(); + size_t phi_tensor_index = 0; + // Check each tensor in KernelContext, if there is a tensor that has + // different type with variable. The PhiKernelContext need be reconstructed. + // We use kernel_signature_'s output to retrieve tensor. Because the tensor + // in phi_kernel_context stored in the order of kernel_signature_'s output. + if (phi_kernel_context->OutputsSize() >= phi_tensor_index || + kernel_signature_ == nullptr) { need_prepare_phi_data_ = true; return; } + + const auto& phi_output_names = kernel_signature_->output_names; + for (auto& phi_output_name : phi_output_names) { + const auto& iter = outnames.find(phi_output_name); + if (iter != outnames.end()) { + for (auto& var_name : iter->second) { + auto var_output = scope.FindVar(var_name); + auto phi_output = + phi_kernel_context->MutableOutputAt( + phi_tensor_index); + if (phi_output == nullptr) { + continue; + } + if (!(HasSameTensorType(phi_output, var_output) || + HasSameTensorType(phi_output, + var_output) || + HasSameTensorType(phi_output, + var_output))) { + need_prepare_phi_data_ = true; + } + phi_tensor_index++; + } + } + } } }