提交 f5811ba4 编写于 作者: Y Yibing Liu

Tiny fixes in profiling and training scripts

上级 cff2f954
......@@ -2,6 +2,8 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def to_lodtensor(data, place):
"""convert tensor to lodtensor
......
......@@ -84,8 +84,6 @@ def _net_conf(feature, label, hidden_dim, proj_dim, stacked_num, class_num,
size=class_num,
act='softmax')
if not is_train: return feature, prediction
cost = fluid.layers.cross_entropy(input=prediction, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc = fluid.layers.accuracy(input=prediction, label=label)
......
......@@ -71,13 +71,14 @@ def parse_args():
parser.add_argument(
'--max_batch_num',
type=int,
default=11,
default=10,
help='Maximum number of batches for profiling. (default: %(default)d)')
parser.add_argument(
'--num_batch_to_skip',
'--first_batches_to_skip',
type=int,
default=1,
help='Number of batches to skip for profiling. (default: %(default)d)')
help='Number of first batches to skip for profiling. '
'(default: %(default)d)')
parser.add_argument(
'--print_train_acc',
action='store_true',
......@@ -103,14 +104,15 @@ def print_arguments(args):
def profile(args):
"""profile the training process"""
if not args.num_batch_to_skip < args.max_batch_num:
raise ValueError("arg 'num_batch_to_skip' must be smaller than "
if not args.first_batches_to_skip < args.max_batch_num:
raise ValueError("arg 'first_batches_to_skip' must be smaller than "
"'max_batch_num'.")
if not args.num_batch_to_skip >= 0:
raise ValueError("arg 'num_batch_to_skip' must not be smaller than 0.")
if not args.first_batches_to_skip >= 0:
raise ValueError(
"arg 'first_batches_to_skip' must not be smaller than 0.")
prediction, avg_cost, accuracy = stacked_lstmp_model(
args.hidden_dim, args.proj_dim, args.stacked_num, args.parallel)
_, avg_cost, accuracy = stacked_lstmp_model(args.hidden_dim, args.proj_dim,
args.stacked_num, args.parallel)
adam_optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)
adam_optimizer.minimize(avg_cost)
......@@ -135,7 +137,7 @@ def profile(args):
with profiler.profiler(args.device, sorted_key) as prof:
frames_seen, start_time = 0, 0.0
for batch_id in range(0, args.max_batch_num):
if args.num_batch_to_skip == batch_id:
if args.first_batches_to_skip == batch_id:
profiler.reset_profiler()
start_time = time.time()
frames_seen = 0
......
......@@ -92,8 +92,8 @@ def print_arguments(args):
def train(args):
"""train in loop."""
prediction, avg_cost, accuracy = stacked_lstmp_model(
args.hidden_dim, args.proj_dim, args.stacked_num, args.parallel)
_, avg_cost, accuracy = stacked_lstmp_model(args.hidden_dim, args.proj_dim,
args.stacked_num, args.parallel)
adam_optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate)
adam_optimizer.minimize(avg_cost)
......@@ -144,7 +144,7 @@ def train(args):
pass_end_time = time.time()
time_consumed = pass_end_time - pass_start_time
# need to add test logic (kuke)
print("\nPass %d, time: %fs, test accuracy: 0.0f\n" %
print("\nPass %d, time consumed: %fs, test accuracy: 0.0f\n" %
(pass_id, time_consumed))
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册