diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index fc605bebc5481686c2ef28766d39eed9eb4b0c92..349133491f11a5ff6454cebdc9d15b66af238561 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1565,41 +1565,16 @@ void OperatorWithKernel::CheckWhetherPreparePhiData( const VariableNameMap& outnames, const Scope& scope) const { if (run_phi_kernel_ && impl_ != nullptr) { - const auto& phi_kernel_context = impl_->getKernelContext(); - size_t phi_tensor_index = 0; - // Check each tensor in KernelContext, if there is a tensor that has - // different type with variable. The PhiKernelContext need be reconstructed. - // We use kernel_signature_'s output to retrieve tensor. Because the tensor - // in phi_kernel_context stored in the order of kernel_signature_'s output. - if (phi_kernel_context->OutputsSize() >= phi_tensor_index || - kernel_signature_ == nullptr) { + // For feed, there are two situations we need prepare phi data. + // 1. Sometimes the output's tensor in cached PhiKernelContext is + // inconsistent with the variable in scope. So we need prepare phi data. + // 2. Somehow, sometimes the input's tensor in cached PhiKernelContext has + // some problems. When we use these inputs, we get segmentfault and we must + // prepare phi data. + if (Type() == "feed") { need_prepare_phi_data_ = true; return; } - - const auto& phi_output_names = kernel_signature_->output_names; - for (auto& phi_output_name : phi_output_names) { - const auto& iter = outnames.find(phi_output_name); - if (iter != outnames.end()) { - for (auto& var_name : iter->second) { - auto var_output = scope.FindVar(var_name); - auto phi_output = - phi_kernel_context->MutableOutputAt( - phi_tensor_index); - if (phi_output == nullptr) { - continue; - } - if (!(HasSameTensorType(phi_output, var_output) || - HasSameTensorType(phi_output, - var_output) || - HasSameTensorType(phi_output, - var_output))) { - need_prepare_phi_data_ = true; - } - phi_tensor_index++; - } - } - } } }