未验证 提交 4f457075 编写于 作者: Y Yang Zhang 提交者: GitHub

Tweak command line scripts (#2517)

* Tweak command line scripts

enable fine grained control of command flags parsing
switch to attribute style of accessing config options

* Break down visualization function

decouple it from IO operations

* Move flag parsing out of `main()`

* Fix a bug where `None` is returned instead of `{}`

* Rename `save_xxx` to `output_xxx` in command line flags

could be confusing since checkpoint is stored in `save_dir`

* Support image file extensions in upper case
上级 60a0e779
......@@ -12,11 +12,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from argparse import ArgumentParser, RawDescriptionHelpFormatter, REMAINDER
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import yaml
__all__ = ['ColorTTY', 'ArgsParser']
class ColorTTY(object):
def __init__(self):
......@@ -40,60 +41,39 @@ class ColorTTY(object):
return "[{}m{}".format(code, message)
def parse_args():
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
parser.add_argument("-c", "--config", help="configuration file to use")
parser.add_argument(
"-s",
"--savefile",
default=None,
type=str,
help="Save json file name for evaluation, if not set, default files are bbox.json and mask.json."
)
parser.add_argument(
"-r",
"--resume_checkpoint",
default=None,
type=str,
help="The checkpoint path for resuming training.")
parser.add_argument(
"--eval",
action='store_true',
default=False,
help="Whether perform evaluation in train")
parser.add_argument(
"--infer_dir",
type=str,
default=None,
help="Image directory path to perform inference.")
parser.add_argument(
"--infer_img",
type=str,
default=None,
help="Image path to perform inference, --infer-img has a higher priority than --image-dir")
parser.add_argument(
"-o", "--opt", nargs=REMAINDER, help="set configuration options")
args = parser.parse_args()
class ArgsParser(ArgumentParser):
def __init__(self):
super(ArgsParser, self).__init__(
formatter_class=RawDescriptionHelpFormatter)
self.add_argument("-c", "--config", help="configuration file to use")
self.add_argument("-o", "--opt", nargs='*',
help="set configuration options")
if args.config is None:
raise ValueError("Please specify --config=configure_file_path.")
def parse_args(self, argv=None):
args = super(ArgsParser, self).parse_args(argv)
assert args.config is not None, \
"Please specify --config=configure_file_path."
args.opt = self._parse_opt(args.opt)
return args
cli_config = {}
if 'opt' in vars(args) and args.opt is not None:
for s in args.opt:
def _parse_opt(self, opts):
config = {}
if not opts:
return config
for s in opts:
s = s.strip()
k, v = s.split('=')
if '.' not in k:
cli_config[k] = v
config[k] = v
else:
keys = k.split('.')
cli_config[keys[0]] = {}
cur = cli_config[keys[0]]
config[keys[0]] = {}
cur = config[keys[0]]
for idx, key in enumerate(keys[1:]):
if idx == len(keys) - 2:
cur[key] = yaml.load(v, Loader=yaml.Loader)
else:
cur[key] = {}
cur = cur[key]
args.cli_config = cli_config
return args
return config
......@@ -48,7 +48,7 @@ def parse_fetches(fetches, prog=None, extra_keys=None):
v.persistable = True
keys.append(k)
values.append(v.name)
except:
except Exception:
pass
return keys, values, cls
......@@ -88,23 +88,21 @@ def eval_run(exe, compile_program, pyreader, keys, values, cls):
return results
def eval_results(results, feed, args, cfg):
def eval_results(results, feed, metric, resolution, output_file=None):
"""Evaluation for evaluation program results"""
metric = cfg['metric']
if metric == 'COCO':
from ppdet.utils.coco_eval import bbox_eval, mask_eval
anno_file = getattr(feed.dataset, 'annotation', None)
with_background = getattr(feed, 'with_background', True)
savefile = 'bbox.json'
if args.savefile:
savefile = '{}_bbox.json'.format(args.savefile)
bbox_eval(results, anno_file, savefile, with_background)
output = 'bbox.json'
if output_file:
output = '{}_bbox.json'.format(output_file)
bbox_eval(results, anno_file, output, with_background)
if 'mask' in results[0]:
savefile = 'mask.json'
if args.savefile:
savefile = '{}_mask.json'.format(args.savefile)
mask_eval(results, anno_file, savefile,
cfg['MaskHead']['resolution'])
output = 'mask.json'
if output_file:
output = '{}_mask.json'.format(output_file)
mask_eval(results, anno_file, output, resolution)
else:
res = np.mean(results[-1]['accum_map'][0])
logger.info('Test mAP: {}'.format(res))
......@@ -17,22 +17,16 @@ from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import numpy as np
import pycocotools.mask as mask_util
from PIL import Image, ImageDraw
from .colormap import colormap
logger = logging.getLogger(__name__)
__all__ = ['visualize_results']
SAVE_HOME = 'output'
def visualize_results(image_path,
def visualize_results(image,
catid2name,
threshold=0.5,
bbox_results=None,
......@@ -40,19 +34,11 @@ def visualize_results(image_path,
"""
Visualize bbox and mask results
"""
if not os.path.exists(SAVE_HOME):
os.makedirs(SAVE_HOME)
logger.info("Image {} detect: ".format(image_path))
image = Image.open(image_path)
if mask_results:
image = draw_mask(image, mask_results, threshold)
if bbox_results:
image = draw_bbox(image, catid2name, bbox_results, threshold)
save_name = get_save_image_name(image_path)
logger.info("Detection results save in {}\n".format(save_name))
image.save(save_name)
return image
def draw_mask(image, segms, threshold, alpha=0.7):
......@@ -62,7 +48,7 @@ def draw_mask(image, segms, threshold, alpha=0.7):
im_width, im_height = image.size
mask_color_id = 0
w_ratio = .4
image = np.array(image).astype('float32')
img_array = np.array(image).astype('float32')
for dt in np.array(segms):
segm, score = dt['segmentation'], dt['score']
if score < threshold:
......@@ -74,10 +60,9 @@ def draw_mask(image, segms, threshold, alpha=0.7):
for c in range(3):
color_mask[c] = color_mask[c] * (1 - w_ratio) + w_ratio * 255
idx = np.nonzero(mask)
image[idx[0], idx[1], :] *= 1.0 - alpha
image[idx[0], idx[1], :] += alpha * color_mask
image = Image.fromarray(image.astype('uint8'))
return image
img_array[idx[0], idx[1], :] *= 1.0 - alpha
img_array[idx[0], idx[1], :] += alpha * color_mask
return Image.fromarray(img_array.astype('uint8'))
def draw_bbox(image, catid2name, bboxes, threshold):
......@@ -101,17 +86,5 @@ def draw_bbox(image, catid2name, bboxes, threshold):
fill='red')
if image.mode == 'RGB':
draw.text((xmin, ymin), catid2name[catid], (255, 255, 0))
logger.info("\t {:15s} at {:25} score: {:.5f}".format(
catid2name[catid],
str(list(map(int, [xmin, ymin, xmax, ymax]))),
score))
return image
def get_save_image_name(image_path):
"""
Get save image name from source image path.
"""
image_name = image_path.split('/')[-1]
name, ext = os.path.splitext(image_name)
return os.path.join(SAVE_HOME, "{}".format(name)) + ext
......@@ -23,7 +23,7 @@ import paddle.fluid as fluid
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.cli import parse_args
from ppdet.utils.cli import ArgsParser
from ppdet.modeling.model_input import create_feeds
from ppdet.data.data_feed import create_reader
from ppdet.core.workspace import load_config, merge_config, create
......@@ -38,28 +38,27 @@ def main():
"""
Main evaluate function
"""
args = parse_args()
cfg = load_config(args.config)
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg['architecture']
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(args.cli_config)
merge_config(FLAGS.opt)
if cfg['use_gpu']:
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
devices_num = int(os.environ.get('CPU_NUM',
multiprocessing.cpu_count()))
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
eval_feed = create(cfg['eval_feed'])
eval_feed = create(cfg.eval_feed)
# define executor
place = fluid.CUDAPlace(0) if cfg['use_gpu'] else fluid.CPUPlace()
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# 2. build program
......@@ -88,11 +87,11 @@ def main():
# 5. Load model
exe.run(startup_prog)
if cfg['weights']:
checkpoint.load_pretrain(exe, eval_prog, cfg['weights'])
if 'weights' in cfg:
checkpoint.load_pretrain(exe, eval_prog, cfg.weights)
extra_keys = []
if cfg['metric'] == 'COCO':
if 'metric' in cfg and cfg.metric == 'COCO':
extra_keys = ['im_info', 'im_id', 'im_shape']
keys, values, cls = parse_fetches(fetches, eval_prog, extra_keys)
......@@ -100,8 +99,18 @@ def main():
# 6. Run
results = eval_run(exe, compile_program, pyreader, keys, values, cls)
# Evaluation
eval_results(results, eval_feed, args, cfg)
eval_results(results, eval_feed, cfg.metric,
cfg.MaskHead.resolution, FLAGS.output_file)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-f",
"--output_file",
default=None,
type=str,
help="Evaluation file name, default to bbox.json and mask.json."
)
FLAGS = parser.parse_args()
main()
......@@ -20,6 +20,7 @@ import os
import glob
import numpy as np
from PIL import Image
from paddle import fluid
......@@ -28,7 +29,7 @@ from ppdet.modeling.model_input import create_feeds
from ppdet.data.data_feed import create_reader
from ppdet.utils.eval_utils import parse_fetches
from ppdet.utils.cli import parse_args
from ppdet.utils.cli import ArgsParser
from ppdet.utils.visualizer import visualize_results
import ppdet.utils.checkpoint as checkpoint
......@@ -38,12 +39,23 @@ logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
def get_save_image_name(output_dir, image_path):
"""
Get save image name from source image path.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_name = image_path.split('/')[-1]
name, ext = os.path.splitext(image_name)
return os.path.join(output_dir, "{}".format(name)) + ext
def get_test_images(infer_dir, infer_img):
"""
Get image path list in TEST mode
"""
assert infer_img is not None or infer_dir is not None, \
"--infer-img or --infer-dir should be set"
"--infer-img or --infer-dir should be set"
images = []
# infer_img has a higher priority
......@@ -53,37 +65,37 @@ def get_test_images(infer_dir, infer_img):
infer_dir = os.path.abspath(infer_dir)
assert os.path.isdir(infer_dir), \
"infer_dir {} is not a directory".format(infer_dir)
for fmt in ['jpg', 'jpeg', 'png', 'bmp']:
images.extend(glob.glob('{}/*.{}'.format(infer_dir, fmt)))
"infer_dir {} is not a directory".format(infer_dir)
exts = ['jpg', 'jpeg', 'png', 'bmp']
exts += [ext.upper() for ext in exts]
for ext in exts:
images.extend(glob.glob('{}/*.{}'.format(infer_dir, ext)))
assert len(images) > 0, "no image found in {} with " \
"extension {}".format(infer_dir, image_ext)
assert len(images) > 0, "no image found in {}".format(infer_dir)
logger.info("Found {} inference images in total.".format(len(images)))
return images
def main():
args = parse_args()
cfg = load_config(args.config)
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg['architecture']
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(args.cli_config)
merge_config(FLAGS.opt)
if 'test_feed' not in cfg:
test_feed = create(main_arch + 'TestFeed')
else:
test_feed = create(cfg['test_feed'])
test_feed = create(cfg.test_feed)
test_images = get_test_images(args.infer_dir, args.infer_img)
test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img)
test_feed.dataset.add_images(test_images)
place = fluid.CUDAPlace(0) if cfg['use_gpu'] else fluid.CPUPlace()
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
model = create(main_arch)
......@@ -100,8 +112,8 @@ def main():
feeder = fluid.DataFeeder(place=place, feed_list=feed_vars.values())
exe.run(startup_prog)
if cfg['weights']:
checkpoint.load_checkpoint(exe, infer_prog, cfg['weights'])
if cfg.weights:
checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)
# parse infer fetches
extra_keys = []
......@@ -110,9 +122,9 @@ def main():
keys, values, _ = parse_fetches(test_fetches, infer_prog, extra_keys)
# 6. Parse dataset category
if cfg['metric'] == 'COCO':
if cfg.metric == 'COCO':
from ppdet.utils.coco_eval import bbox2out, mask2out, get_category_info
if cfg['metric'] == "VOC":
if cfg.metric == "VOC":
# TODO(dengkaipeng): add VOC metric process
pass
......@@ -134,21 +146,42 @@ def main():
im_id = int(res['im_id'][0])
image_path = imid2path[im_id]
if cfg['metric'] == 'COCO':
if cfg.metric == 'COCO':
bbox_results = None
mask_results = None
if 'bbox' in res:
bbox_results = bbox2out([res], clsid2catid)
if 'mask' in res:
mask_results = mask2out([res], clsid2catid,
cfg['MaskHead']['resolution'])
visualize_results(image_path, catid2name, 0.5, bbox_results,
mask_results)
if cfg['metric'] == "VOC":
cfg.MaskHead.resolution)
image = Image.open(image_path)
image = visualize_results(image, catid2name, 0.5,
bbox_results, mask_results)
save_name = get_save_image_name(FLAGS.output_dir, image_path)
logger.info("Detection bbox results save in {}".format(save_name))
image.save(save_name)
if cfg.metric == "VOC":
# TODO(dengkaipeng): add VOC metric process
pass
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"--infer_dir",
type=str,
default=None,
help="Directory for images to perform inference on.")
parser.add_argument(
"--infer_img",
type=str,
default=None,
help="Image path, has higher priority over --infer_dir")
parser.add_argument(
"--output_dir",
type=str,
default="output",
help="Directory for storing the output visualization files.")
FLAGS = parser.parse_args()
main()
......@@ -29,7 +29,7 @@ from ppdet.data.data_feed import create_reader
from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results
from ppdet.utils.stats import TrainingStats
from ppdet.utils.cli import parse_args
from ppdet.utils.cli import ArgsParser
import ppdet.utils.checkpoint as checkpoint
from ppdet.modeling.model_input import create_feeds
......@@ -40,33 +40,33 @@ logger = logging.getLogger(__name__)
def main():
args = parse_args()
cfg = load_config(args.config)
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg['architecture']
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(args.cli_config)
merge_config(FLAGS.opt)
if cfg['use_gpu']:
if cfg.use_gpu:
devices_num = fluid.core.get_cuda_device_count()
else:
devices_num = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
devices_num = int(os.environ.get('CPU_NUM',
multiprocessing.cpu_count()))
if 'train_feed' not in cfg:
train_feed = create(main_arch + 'TrainFeed')
else:
train_feed = create(cfg['train_feed'])
train_feed = create(cfg.train_feed)
if args.eval:
if FLAGS.eval:
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
eval_feed = create(cfg['eval_feed'])
eval_feed = create(cfg.eval_feed)
place = fluid.CUDAPlace(0) if cfg['use_gpu'] else fluid.CPUPlace()
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
model = create(main_arch)
......@@ -84,14 +84,14 @@ def main():
optimizer = optim_builder(lr)
optimizer.minimize(loss)
train_reader = create_reader(train_feed, cfg['max_iters'] * devices_num)
train_reader = create_reader(train_feed, cfg.max_iters * devices_num)
train_pyreader.decorate_sample_list_generator(train_reader, place)
# parse train fetches
train_keys, train_values, _ = parse_fetches(train_fetches)
train_values.append(lr)
if args.eval:
if FLAGS.eval:
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
......@@ -103,7 +103,7 @@ def main():
eval_pyreader.decorate_sample_list_generator(eval_reader, place)
# parse train fetches
extra_keys = ['im_info', 'im_id'] if cfg['metric'] == 'COCO' else []
extra_keys = ['im_info', 'im_id'] if cfg.metric == 'COCO' else []
eval_keys, eval_values, eval_cls = parse_fetches(fetches, eval_prog,
extra_keys)
......@@ -116,27 +116,27 @@ def main():
train_compile_program = fluid.compiler.CompiledProgram(
train_prog).with_data_parallel(
loss_name=loss.name, build_strategy=build_strategy)
if args.eval:
if FLAGS.eval:
eval_compile_program = fluid.compiler.CompiledProgram(eval_prog)
exe.run(startup_prog)
freeze_bn = getattr(model.backbone, 'freeze_norm', False)
if args.resume_checkpoint:
checkpoint.load_checkpoint(exe, train_prog, args.resume_checkpoint)
elif cfg['pretrain_weights'] and freeze_bn:
checkpoint.load_and_fusebn(exe, train_prog, cfg['pretrain_weights'])
elif cfg['pretrain_weights']:
checkpoint.load_pretrain(exe, train_prog, cfg['pretrain_weights'])
train_stats = TrainingStats(cfg['log_smooth_window'], train_keys)
if FLAGS.resume_checkpoint:
checkpoint.load_checkpoint(exe, train_prog, FLAGS.resume_checkpoint)
elif cfg.pretrain_weights and freeze_bn:
checkpoint.load_and_fusebn(exe, train_prog, cfg.pretrain_weights)
elif cfg.pretrain_weights:
checkpoint.load_pretrain(exe, train_prog, cfg.pretrain_weights)
train_stats = TrainingStats(cfg.log_smooth_window, train_keys)
train_pyreader.start()
start_time = time.time()
end_time = time.time()
cfg_name = os.path.basename(args.config).split('.')[0]
save_dir = os.path.join(cfg['save_dir'], cfg_name)
for it in range(cfg['max_iters']):
cfg_name = os.path.basename(FLAGS.config).split('.')[0]
save_dir = os.path.join(cfg.save_dir, cfg_name)
for it in range(cfg.max_iters):
start_time = end_time
end_time = time.time()
outs = exe.run(train_compile_program, fetch_list=train_values)
......@@ -147,19 +147,40 @@ def main():
it, np.mean(outs[-1]), logs, end_time - start_time)
logger.info(strs)
if it > 0 and it % cfg['snapshot_iter'] == 0:
if it > 0 and it % cfg.snapshot_iter == 0:
checkpoint.save(exe, train_prog, os.path.join(save_dir, str(it)))
if args.eval:
if FLAGS.eval:
# Run evaluation
results = eval_run(exe, eval_compile_program, eval_pyreader,
eval_keys, eval_values, eval_cls)
# Evaluation
eval_results(results, eval_feed, args, cfg)
eval_results(results, eval_feed, cfg.metric,
cfg.MaskHead.resolution, FLAGS.output_file)
checkpoint.save(exe, train_prog, os.path.join(save_dir, "model_final"))
train_pyreader.reset()
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-r",
"--resume_checkpoint",
default=None,
type=str,
help="Checkpoint path for resuming training.")
parser.add_argument(
"--eval",
action='store_true',
default=False,
help="Whether to perform evaluation in train")
parser.add_argument(
"-f",
"--output_file",
default=None,
type=str,
help="Evaluation file name, default to bbox.json and mask.json."
)
FLAGS = parser.parse_args()
main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册