sh run_infer.sh
Created by: VaibhavHiwase
sh run_infer.sh /usr/local/lib/python2.7/dist-packages/requests/init.py:83: RequestsDependencyWarning: Old version of cryptography ([1, 2, 3]) may cause slowdown. warnings.warn(warning, RequestsDependencyWarning) ----------- Configuration Arguments ----------- alpha: 2.5 beam_size: 1 beta: 0.3 cutoff_prob: 1.0 cutoff_top_n: 4 decoding_method: ctc_beam_search error_rate_type: wer infer_manifest: data/librispeech/manifest.dev-clean lang_model_path: models/lm/common_crawl_00.prune01111.trie.klm mean_std_path: /home/vaibhav/speech/DeepSpeech-develop/models/librispeech/librispeech_model_fluid/mean_std.npz model_path: checkpoints/libri/step_final num_conv_layers: 2 num_proc_bsearch: 1 num_rnn_layers: 3 num_samples: 1 rnn_layer_size: 2048 share_rnn_weights: 0 specgram_type: linear use_gpu: 0 use_gru: 0 vocab_path: /home/vaibhav/speech/DeepSpeech-develop/models/librispeech/librispeech_model_fluid/vocab.txt
2020-01-28 17:27:56,084-INFO: begin to initialize the external scorer for decoding 2020-01-28 17:29:55,926-INFO: language model: is_character_based = 0, max_order = 5, dict_size = 400000 2020-01-28 17:29:56,215-INFO: end initializing scorer 2020-01-28 17:29:56,215-INFO: start inference ... [libprotobuf ERROR /paddle/build/third_party/protobuf/src/extern_protobuf/src/google/protobuf/message_lite.cc:119] Can't parse message of type "paddle.framework.proto.VarType.TensorDesc" because it is missing required fields: (cannot determine missing fields for lite message) /usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py:779: UserWarning: The following exception is not an EOF exception. "The following exception is not an EOF exception.") Traceback (most recent call last): File "infer.py", line 152, in main() File "infer.py", line 148, in main infer() File "infer.py", line 124, in infer feeding_dict=data_generator.feeding) File "/home/vaibhav/speech/DeepSpeech-develop/model_utils/model.py", line 412, in infer_batch_probs self.init_from_pretrained_model(exe, infer_program) File "/home/vaibhav/speech/DeepSpeech-develop/model_utils/model.py", line 161, in init_from_pretrained_model filename="params.pdparams") File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/io.py", line 798, in load_params filename=filename) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/io.py", line 682, in load_vars filename=filename) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/io.py", line 726, in load_vars executor.run(load_prog) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 780, in run six.reraise(*sys.exc_info()) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 775, in run use_program_cache=use_program_cache) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 822, in _run_impl use_program_cache=use_program_cache) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/executor.py", line 899, in _run_program fetch_var_name) paddle.fluid.core_avx.EnforceNotMet:
C++ Call Stacks (More useful to developers):
0 std::string paddle::platform::GetTraceBackString<char const*>(char const*&&, char const*, int) 1 paddle::platform::EnforceNotMet::EnforceNotMet(std::__exception_ptr::exception_ptr, char const*, int) 2 paddle::framework::TensorFromStream(std::istream&, paddle::framework::Tensor*, paddle::platform::DeviceContext const&) 3 paddle::framework::DeserializeFromStream(std::istream&, paddle::framework::LoDTensor*, paddle::platform::DeviceContext const&) 4 paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, float>::LoadParamsFromBuffer(paddle::framework::ExecutionContext const&, paddle::platform::Place const&, std::istream*, bool, std::vector<std::string, std::allocatorstd::string > const&) const 5 paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, float>::Compute(paddle::framework::ExecutionContext const&) const 6 std::_Function_handler<void (paddle::framework::ExecutionContext const&), paddle::framework::OpKernelRegistrarFunctor<paddle::platform::CPUPlace, false, 0ul, paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, float>, paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, double>, paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, int>, paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, signed char>, paddle::operators::LoadCombineOpKernel<paddle::platform::CPUDeviceContext, long> >::operator()(char const*, char const*, int) const::{lambda(paddle::framework::ExecutionContext const&)#1 (closed)}>::_M_invoke(std::_Any_data const&, paddle::framework::ExecutionContext const&) 7 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&, paddle::framework::RuntimeContext*) const 8 paddle::framework::OperatorWithKernel::RunImpl(paddle::framework::Scope const&, paddle::platform::Place const&) const 9 paddle::framework::OperatorBase::Run(paddle::framework::Scope const&, paddle::platform::Place const&) 10 paddle::framework::Executor::RunPreparedContext(paddle::framework::ExecutorPrepareContext*, paddle::framework::Scope*, bool, bool, bool) 11 paddle::framework::Executor::Run(paddle::framework::ProgramDesc const&, paddle::framework::Scope*, int, bool, bool, std::vector<std::string, std::allocatorstd::string > const&, bool)
Python Call Stacks (More useful to users):
File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/framework.py", line 2488, in append_op attrs=kwargs.get("attrs", None)) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/io.py", line 725, in load_vars attrs={'file_path': os.path.join(load_dirname, filename)}) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/io.py", line 682, in load_vars filename=filename) File "/usr/local/lib/python2.7/dist-packages/paddle/fluid/io.py", line 798, in load_params filename=filename) File "/home/vaibhav/speech/DeepSpeech-develop/model_utils/model.py", line 161, in init_from_pretrained_model filename="params.pdparams") File "/home/vaibhav/speech/DeepSpeech-develop/model_utils/model.py", line 412, in infer_batch_probs self.init_from_pretrained_model(exe, infer_program) File "infer.py", line 124, in infer feeding_dict=data_generator.feeding) File "infer.py", line 148, in main infer() File "infer.py", line 152, in main()
Error Message Summary:
Error: Cannot parse tensor desc at (/paddle/paddle/fluid/framework/tensor_util.cc:466) [operator < load_combine > error] Failed in inference!