diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py index 3574c1c0e91bf264abb853ae0ad346011a0b520b..1cb4314fb22ae935a00ceea25128e7f2b5430c84 100644 --- a/benchmark/fluid/mnist.py +++ b/benchmark/fluid/mnist.py @@ -179,9 +179,13 @@ def run_benchmark(model, args): outs = train_exe.run( feed={"pixel": img_data, "label": y_data}, - fetch_list=[avg_cost.name, batch_acc.name, batch_size_tensor.name] + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ] ) # The accuracy is the accumulation of batches, but not the current batch. - accuracy.update(value=np.array(np.mean(outs[1])), weight=np.mean(np.array(outs[2]))) + accuracy.update( + value=np.array(np.mean(outs[1])), + weight=np.mean(np.array(outs[2]))) iters += 1 num_samples += len(y_data) loss = np.mean(np.array(outs[0])) diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py index 5f60f806f7f1d402a9b39e253037c1b8d9fb15dc..0fd7258a804e7c93b0b03da140140394bf90004a 100644 --- a/benchmark/fluid/resnet.py +++ b/benchmark/fluid/resnet.py @@ -268,7 +268,9 @@ def run_benchmark(model, args): loss, acc, weight = train_exe.run( feed={'data': image, 'label': label}, - fetch_list=[avg_cost.name, batch_acc.name, batch_size_tensor.name]) + fetch_list=[ + avg_cost.name, batch_acc.name, batch_size_tensor.name + ]) iters += 1 num_samples += len(label) accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight))