Adam那一行报错
Created by: bufuchangfeng
640 with program_guard(program, startup_program): 641 params_grads = append_backward(loss, parameter_list, --> 642 act_no_grad_set, callbacks) 643 # Note: since we can't use all_reduce_op now, 644 # dgc_op should be the last op of one grad. /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/backward.py in append_backward(loss, parameter_list, no_grad_set, callbacks, checkpoints) 1265 1266 program.current_block_idx = current_block_idx -> 1267 program._sync_with_cpp() 1268 1269 if parameter_list is not None: /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in _sync_with_cpp(self) 4424 self.blocks.append(Block(self, block_idx)) 4425 for block in self.blocks: -> 4426 block._sync_with_cpp() 4427 4428 def _copy_param_info_from(self, other): /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in _sync_with_cpp(self) 2659 ops_in_python_index += 1 2660 -> 2661 assert len(self.ops) == len(ops_in_cpp) 2662 for index in range(len(self.ops)): 2663 assert self.ops[index].desc == ops_in_cpp[index] AssertionError: