提交 19f9260b 编写于 作者: D Dang Qingqing

Update argument comments.

上级 f1789a58
......@@ -18,17 +18,19 @@ add_arg('batch_size', int, 256, "Minibatch size.")
add_arg('num_layers', int, 50, "How many layers for SE-ResNeXt model.")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('parallel_exe', bool, True, "Whether to use ParallelExecutor to train or not.")
def train_paralle_do(args,
learning_rate,
batch_size,
num_passes,
init_model=None,
model_save_dir='model',
parallel=True,
use_nccl=True,
lr_strategy=None,
layers=50):
# yapf: enable
def train_parallel_do(args,
learning_rate,
batch_size,
num_passes,
init_model=None,
model_save_dir='model',
parallel=True,
use_nccl=True,
lr_strategy=None,
layers=50):
class_dim = 1000
image_shape = [3, 224, 224]
......@@ -62,6 +64,8 @@ def train_paralle_do(args,
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
inference_program = fluid.default_main_program().clone(for_test=True)
if lr_strategy is None:
optimizer = fluid.optimizer.Momentum(
learning_rate=learning_rate,
......@@ -76,12 +80,9 @@ def train_paralle_do(args,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
inference_program = fluid.default_main_program().clone(for_test=True)
opts = optimizer.minimize(avg_cost)
if args.with_mem_opt:
fluid.memory_optimize(fluid.default_main_program())
fluid.memory_optimize(inference_program)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
......@@ -154,6 +155,7 @@ def train_paralle_do(args,
os.makedirs(model_path)
fluid.io.save_persistables(exe, model_path)
def train_parallel_exe(args,
learning_rate,
batch_size,
......@@ -195,7 +197,6 @@ def train_parallel_exe(args,
if args.with_mem_opt:
fluid.memory_optimize(fluid.default_main_program())
fluid.memory_optimize(test_program)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
......@@ -210,9 +211,7 @@ def train_parallel_exe(args,
train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name)
test_exe = fluid.ParallelExecutor(
use_cuda=True,
main_program=test_program,
share_vars_from=train_exe)
use_cuda=True, main_program=test_program, share_vars_from=train_exe)
fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name]
......@@ -221,9 +220,8 @@ def train_parallel_exe(args,
test_info = [[], [], []]
for batch_id, data in enumerate(train_reader()):
t1 = time.time()
loss, acc1, acc5 = train_exe.run(
fetch_list,
feed_dict=feeder.feed(data))
loss, acc1, acc5 = train_exe.run(fetch_list,
feed_dict=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
......@@ -245,9 +243,8 @@ def train_parallel_exe(args,
train_acc5 = np.array(train_info[2]).mean()
for data in test_reader():
t1 = time.time()
loss, acc1, acc5 = test_exe.run(
fetch_list,
feed_dict=feeder.feed(data))
loss, acc1, acc5 = test_exe.run(fetch_list,
feed_dict=feeder.feed(data))
t2 = time.time()
period = t2 - t1
loss = np.mean(np.array(loss))
......@@ -281,8 +278,6 @@ def train_parallel_exe(args,
fluid.io.save_persistables(exe, model_path)
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
......@@ -300,12 +295,13 @@ if __name__ == '__main__':
# layers: 50, 152
layers = args.num_layers
method = train_parallel_exe if args.parallel_exe else train_parallel_do
method(args,
learning_rate=0.1,
batch_size=batch_size,
num_passes=120,
init_model=None,
parallel=True,
use_nccl=True,
lr_strategy=lr_strategy,
layers=layers)
method(
args,
learning_rate=0.1,
batch_size=batch_size,
num_passes=120,
init_model=None,
parallel=True,
use_nccl=True,
lr_strategy=lr_strategy,
layers=layers)
......@@ -15,24 +15,21 @@ add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('dataset', str, 'pascalvoc', "coco or pascalvoc.")
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('data_dir', str, '', "The path to save model.")
add_arg('test_list', str, '', "The path to save model.")
add_arg('label_file', str, '', "Label file.")
add_arg('model_dir', str, '', "The path to save model.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('data_dir', str, '', "The data root path.")
add_arg('test_list', str, '', "The testing data lists.")
add_arg('label_file', str, '', "The label file, which save the real name and is only used for Pascal VOC.")
add_arg('model_dir', str, '', "The model path.")
add_arg('ap_version', str, '11point', "11point or integral")
add_arg('resize_h', int, 300, "resize image size")
add_arg('resize_w', int, 300, "resize image size")
add_arg('mean_value_B', float, 127.5, "mean value which will be subtracted") #123.68
add_arg('mean_value_G', float, 127.5, "mean value which will be subtracted") #116.78
add_arg('mean_value_R', float, 127.5, "mean value which will be subtracted") #103.94
# yapf: disable
add_arg('resize_h', int, 300, "The resized image height.")
add_arg('resize_w', int, 300, "The resized image width.")
add_arg('mean_value_B', float, 127.5, "mean value for B channel which will be subtracted") #123.68
add_arg('mean_value_G', float, 127.5, "mean value for G channel which will be subtracted") #116.78
add_arg('mean_value_R', float, 127.5, "mean value for R channel which will be subtracted") #103.94
# yapf: enable
def eval(args,
data_args,
test_list,
batch_size,
model_dir=None):
def eval(args, data_args, test_list, batch_size, model_dir=None):
image_shape = [3, data_args.resize_h, data_args.resize_w]
if data_args.dataset == 'coco':
num_classes = 81
......@@ -50,8 +47,7 @@ def eval(args,
locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=0.45)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
box_var)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
loss = fluid.layers.reduce_sum(loss)
test_program = fluid.default_main_program().clone(for_test=True)
......@@ -71,8 +67,10 @@ def eval(args,
#exe.run(fluid.default_startup_program())
if model_dir:
def if_exist(var):
return os.path.exists(os.path.join(model_dir, var.name))
fluid.io.load_vars(exe, model_dir, predicate=if_exist)
#fluid.io.load_persistables(exe, model_dir, main_program=test_program)
......@@ -89,6 +87,7 @@ def eval(args,
fetch_list=[accum_map])
print("Test model {0}, map {1}".format(model_dir, test_map[0]))
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
......@@ -99,8 +98,9 @@ if __name__ == '__main__':
resize_h=args.resize_h,
resize_w=args.resize_w,
mean_value=[args.mean_value_B, args.mean_value_G, args.mean_value_R])
eval(args,
test_list=args.test_list,
data_args=data_args,
batch_size=args.batch_size,
model_dir=args.model_dir)
eval(
args,
test_list=args.test_list,
data_args=data_args,
batch_size=args.batch_size,
model_dir=args.model_dir)
......@@ -329,18 +329,23 @@ def test(settings, file_list):
def infer(settings, image_path):
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
im_width, im_height = im.size
img = img.resize((settings.resize_w, settings.resize_h), Image.ANTIALIAS)
img = np.array(img)
# HWC to CHW
if len(img.shape) == 3:
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 1, 0)
# RBG to BGR
img = img[[2, 1, 0], :, :]
img = img.astype('float32')
img -= settings.img_mean
img = img * 0.007843
def reader():
im = Image.open(image_path)
if im.mode == 'L':
im = im.convert('RGB')
im_width, im_height = im.size
img = img.resize((settings.resize_w, settings.resize_h),
Image.ANTIALIAS)
img = np.array(img)
# HWC to CHW
if len(img.shape) == 3:
img = np.swapaxes(img, 1, 2)
img = np.swapaxes(img, 1, 0)
# RBG to BGR
img = img[[2, 1, 0], :, :]
img = img.astype('float32')
img -= settings.img_mean
img = img * 0.007843
yield img
return reader
......@@ -16,23 +16,23 @@ add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('learning_rate', float, 0.001, "Learning rate.")
add_arg('batch_size', int, 32, "Minibatch size.")
add_arg('num_passes', int, 120, "Epoch number.")
add_arg('num_passes', int, 120, "Epoch number.")
add_arg('parallel', bool, True, "Whether use parallel training.")
add_arg('use_gpu', bool, True, "Whether use GPU.")
add_arg('use_nccl', bool, False, "Whether use NCCL.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('use_nccl', bool, False, "Whether to use NCCL or not.")
add_arg('dataset', str, 'pascalvoc', "coco or pascalvoc.")
add_arg('model_save_dir', str, 'model', "The path to save model.")
add_arg('pretrained_model', str, 'pretrained/ssd_mobilenet_v1_coco/', "The init model path.")
add_arg('apply_distort', bool, True, "Whether apply distort")
add_arg('apply_expand', bool, True, "Whether appley expand")
add_arg('ap_version', str, '11point', "11point or integral")
add_arg('resize_h', int, 300, "resize image size")
add_arg('resize_w', int, 300, "resize image size")
add_arg('mean_value_B', float, 127.5, "mean value which will be subtracted") #123.68
add_arg('mean_value_G', float, 127.5, "mean value which will be subtracted") #116.78
add_arg('mean_value_R', float, 127.5, "mean value which will be subtracted") #103.94
add_arg('resize_h', int, 300, "The resized image height.")
add_arg('resize_w', int, 300, "The resized image width.")
add_arg('mean_value_B', float, 127.5, "mean value for B channel which will be subtracted") #123.68
add_arg('mean_value_G', float, 127.5, "mean value for G channel which will be subtracted") #116.78
add_arg('mean_value_R', float, 127.5, "mean value for R channel which will be subtracted") #103.94
add_arg('is_toy', int, 0, "Toy for quick debug, 0 means using all data, while n means using only n sample")
# yapf: disable
# yapf: enable
def parallel_do(args,
......@@ -118,8 +118,10 @@ def parallel_do(args,
exe.run(fluid.default_startup_program())
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
train_reader = paddle.batch(
......@@ -190,8 +192,7 @@ def parallel_exe(args,
locs, confs, box, box_var = mobile_net(num_classes, image, image_shape)
nmsed_out = fluid.layers.detection_output(
locs, confs, box, box_var, nms_threshold=0.45)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
box_var)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box, box_var)
loss = fluid.layers.reduce_sum(loss)
test_program = fluid.default_main_program().clone(for_test=True)
......@@ -217,7 +218,10 @@ def parallel_exe(args,
elif data_args.dataset == 'pascalvoc':
epocs = 19200 / batch_size
boundaries = [epocs * 40, epocs * 60, epocs * 80, epocs * 100]
values = [learning_rate, learning_rate * 0.5, learning_rate * 0.25, learning_rate * 0.1, learning_rate * 0.01]
values = [
learning_rate, learning_rate * 0.5, learning_rate * 0.25,
learning_rate * 0.1, learning_rate * 0.01
]
optimizer = fluid.optimizer.RMSProp(
learning_rate=fluid.layers.piecewise_decay(boundaries, values),
regularization=fluid.regularizer.L2Decay(0.00005), )
......@@ -229,12 +233,14 @@ def parallel_exe(args,
exe.run(fluid.default_startup_program())
if pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(pretrained_model, var.name))
fluid.io.load_vars(exe, pretrained_model, predicate=if_exist)
train_exe = fluid.ParallelExecutor(use_cuda=args.use_gpu,
loss_name=loss.name)
train_exe = fluid.ParallelExecutor(
use_cuda=args.use_gpu, loss_name=loss.name)
train_reader = paddle.batch(
reader.train(data_args, train_file_list), batch_size=batch_size)
......@@ -251,6 +257,7 @@ def parallel_exe(args,
fluid.io.save_persistables(exe, model_path)
best_map = 0.
def test(pass_id, best_map):
_, accum_map = map_eval.get_map_var()
map_eval.reset(exe)
......@@ -273,7 +280,7 @@ def parallel_exe(args,
start_time = time.time()
if len(data) < devices_num: continue
loss_v, = train_exe.run(fetch_list=[loss.name],
feed_dict=feeder.feed(data))
feed_dict=feeder.feed(data))
end_time = time.time()
loss_v = np.mean(np.array(loss_v))
if batch_id % 20 == 0:
......@@ -284,6 +291,7 @@ def parallel_exe(args,
save_model(str(pass_id))
print("Best test map {0}".format(best_map))
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
......@@ -311,12 +319,13 @@ if __name__ == '__main__':
toy=args.is_toy)
#method = parallel_do
method = parallel_exe
method(args,
train_file_list=train_file_list,
val_file_list=val_file_list,
data_args=data_args,
learning_rate=args.learning_rate,
batch_size=args.batch_size,
num_passes=args.num_passes,
model_save_dir=model_save_dir,
pretrained_model=args.pretrained_model)
method(
args,
train_file_list=train_file_list,
val_file_list=val_file_list,
data_args=data_args,
learning_rate=args.learning_rate,
batch_size=args.batch_size,
num_passes=args.num_passes,
model_save_dir=model_save_dir,
pretrained_model=args.pretrained_model)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册