diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index 7289a451e50573a97455f45ae8d5962d54c0e3df..5bd68f9ac2e1b30bc6ce3094960bb89842b99e01 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -174,14 +174,6 @@ class ExecutionContext { return op_.Inputs(name).size(); } - const std::string InputVarName(const std::string& name) const { - return op_.Input(name); - } - - const std::string OutputVarName(const std::string& name) const { - return op_.Output(name); - } - size_t OutputSize(const std::string& name) const { return op_.Outputs(name).size(); } diff --git a/paddle/fluid/operators/cudnn_lstm_op.cu.cc b/paddle/fluid/operators/cudnn_lstm_op.cu.cc index cadd3772afbbef6416d0c167257a3419d2f9362e..811975a9f3d0321aa01941e3c862ae1743269940 100644 --- a/paddle/fluid/operators/cudnn_lstm_op.cu.cc +++ b/paddle/fluid/operators/cudnn_lstm_op.cu.cc @@ -292,7 +292,7 @@ class CudnnLSTMGPUKernel : public framework::OpKernel { // multi-devices before the first running. // use parent scope to make cache persistable auto *scope = const_cast(ctx.scope().parent()); - auto cache_var_name = ctx.InputVarName("Cache"); + auto cache_var_name = ctx.Inputs("Cache")[0]; cache_var = scope->Var(cache_var_name); } CudnnRNNCache *cudnn_rnn_cache = nullptr;