diff --git a/ppdet/utils/check.py b/ppdet/utils/check.py index a879bcffdde0e6876ce0c1d66c53365e9c9597eb..31f8bc205b49dbcf4746c2cbadb3b0b07bd4f2ba 100644 --- a/ppdet/utils/check.py +++ b/ppdet/utils/check.py @@ -23,7 +23,7 @@ import paddle.fluid as fluid import logging logger = logging.getLogger(__name__) -__all__ = ['check_gpu', 'check_version'] +__all__ = ['check_gpu', 'check_version', 'check_config'] def check_gpu(use_gpu): @@ -59,3 +59,40 @@ def check_version(): except Exception as e: logger.error(err) sys.exit(1) + + +def check_config(cfg): + """ + Check the correctness of the configuration file. Log error and exit + when Config is not compliant. + """ + err = "'{}' not specified in config file. Please set it in config file." + check_list = ['architecture', 'num_classes'] + try: + for var in check_list: + if not var in cfg: + logger.error(err.format(var)) + sys.exit(1) + except Exception as e: + pass + + if 'log_iter' not in cfg: + cfg.log_iter = 20 + + train_dataset = cfg['TrainReader']['dataset'] + eval_dataset = cfg['EvalReader']['dataset'] + test_dataset = cfg['TestReader']['dataset'] + assert train_dataset.with_background == eval_dataset.with_background, \ + "'with_background' of TrainReader is not equal to EvalReader." + assert train_dataset.with_background == test_dataset.with_background, \ + "'with_background' of TrainReader is not equal to TestReader." + + actual_num_classes = int(cfg.num_classes) - int( + train_dataset.with_background) + logger.info("The 'num_classes'(number of classes) you set is {}, " \ + "and 'with_background' in 'dataset' sets {}.\n" \ + "So please note the actual number of categories is {}." + .format(cfg.num_classes, train_dataset.with_background, + actual_num_classes)) + + return cfg diff --git a/slim/distillation/distill.py b/slim/distillation/distill.py index a2b90122346b067673a86abfe18dd879d9fb4f5b..70113949b20a2a595cb8cf4756a467c569da2a3c 100644 --- a/slim/distillation/distill.py +++ b/slim/distillation/distill.py @@ -27,7 +27,7 @@ from ppdet.data.reader import create_reader from ppdet.utils.eval_utils import parse_fetches, eval_results, eval_run from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu +from ppdet.utils.check import check_gpu, check_config import ppdet.utils.checkpoint as checkpoint import logging @@ -125,18 +125,13 @@ def split_distill(split_output_names, weight): def main(): env = os.environ cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - if 'log_iter' not in cfg: - cfg.log_iter = 20 - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) + main_arch = cfg.architecture + if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() else: diff --git a/slim/extensions/distill_pruned_model/distill_pruned_model.py b/slim/extensions/distill_pruned_model/distill_pruned_model.py index e5fa524a36b72f360326e0829875a09ee98a5b0b..c154bafb45d9a8987bd35c8918a021b2a37c5bb9 100644 --- a/slim/extensions/distill_pruned_model/distill_pruned_model.py +++ b/slim/extensions/distill_pruned_model/distill_pruned_model.py @@ -29,7 +29,7 @@ from ppdet.data.reader import create_reader from ppdet.utils.eval_utils import parse_fetches, eval_results, eval_run from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu +from ppdet.utils.check import check_gpu, check_config import ppdet.utils.checkpoint as checkpoint import logging @@ -113,18 +113,13 @@ def split_distill(split_output_names, weight): def main(): env = os.environ cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - if 'log_iter' not in cfg: - cfg.log_iter = 20 - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) + main_arch = cfg.architecture + if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() else: diff --git a/slim/nas/train_nas.py b/slim/nas/train_nas.py index 7cc17061823638f32d6fdf92f6aec5675c7f6065..d025cd9504ba5edea6b8e97614bc01e0c481a3b5 100644 --- a/slim/nas/train_nas.py +++ b/slim/nas/train_nas.py @@ -47,7 +47,7 @@ from ppdet.utils import dist_utils from ppdet.utils.eval_utils import parse_fetches, eval_run from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config import ppdet.utils.checkpoint as checkpoint from paddleslim.analysis import flops, TableLatencyEvaluator from paddleslim.nas import SANAS @@ -209,21 +209,15 @@ def main(): np.random.seed(local_seed) cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - - if 'log_iter' not in cfg: - cfg.log_iter = 20 - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() else: diff --git a/slim/prune/eval.py b/slim/prune/eval.py index ad4ceff77312d144546b58fe29840610ea65ef42..b5d685c1d92baf97a878d3ada80db9ac12d68da1 100644 --- a/slim/prune/eval.py +++ b/slim/prune/eval.py @@ -37,7 +37,7 @@ from paddleslim.analysis import flops from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results import ppdet.utils.checkpoint as checkpoint -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.data.reader import create_reader @@ -55,17 +55,15 @@ def main(): Main evaluate function """ cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + multi_scale_test = getattr(cfg, 'MultiScaleTEST', None) # define executor diff --git a/slim/prune/export_model.py b/slim/prune/export_model.py index bb5863bc437b444c2c0ce1b9803b67edc6755ebc..1d986b48da3c285b4c003d3bbbe95702bb8e8082 100644 --- a/slim/prune/export_model.py +++ b/slim/prune/export_model.py @@ -23,6 +23,7 @@ from paddle import fluid from ppdet.core.workspace import load_config, merge_config, create from ppdet.utils.cli import ArgsParser import ppdet.utils.checkpoint as checkpoint +from ppdet.utils.check import check_config from paddleslim.prune import Pruner from paddleslim.analysis import flops @@ -75,13 +76,10 @@ def save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog): def main(): cfg = load_config(FLAGS.config) - - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) + check_config(cfg) + + main_arch = cfg.architecture # Use CPU for exporting inference model instead of GPU place = fluid.CPUPlace() diff --git a/slim/prune/prune.py b/slim/prune/prune.py index 989b093eeeb591588fec99e611140b0d2ad597ab..9c0d2bf24e02b311796630604750ddc08809b339 100644 --- a/slim/prune/prune.py +++ b/slim/prune/prune.py @@ -27,12 +27,11 @@ from paddle import fluid from ppdet.experimental import mixed_precision_context from ppdet.core.workspace import load_config, merge_config, create from ppdet.data.reader import create_reader -from ppdet.utils.cli import print_total_cfg from ppdet.utils import dist_utils from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config import ppdet.utils.checkpoint as checkpoint import logging @@ -52,22 +51,14 @@ def main(): np.random.seed(local_seed) cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - - if 'log_iter' not in cfg: - cfg.log_iter = 20 - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() - if not FLAGS.dist or trainer_id == 0: - print_total_cfg(cfg) + + main_arch = cfg.architecture if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() diff --git a/slim/quantization/eval.py b/slim/quantization/eval.py index 8812a8f4a0b53b230a9237abf9cf96e42fed1c81..d95c17f9d8dbb2dc369c7066987e974240d4087d 100644 --- a/slim/quantization/eval.py +++ b/slim/quantization/eval.py @@ -23,7 +23,7 @@ import paddle.fluid as fluid from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results import ppdet.utils.checkpoint as checkpoint -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.data.reader import create_reader @@ -44,17 +44,15 @@ def main(): Main evaluate function """ cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + # define executor place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/slim/quantization/export_model.py b/slim/quantization/export_model.py index 32a32584779e79ec1645a16200e9df36a5b9fd1f..07f618ba36be1b0211aa7c7cf44b941d6bb3c4f2 100644 --- a/slim/quantization/export_model.py +++ b/slim/quantization/export_model.py @@ -23,6 +23,7 @@ from paddle import fluid from ppdet.core.workspace import load_config, merge_config, create from ppdet.utils.cli import ArgsParser import ppdet.utils.checkpoint as checkpoint +from ppdet.utils.check import check_config from tools.export_model import prune_feed_vars import logging @@ -50,13 +51,10 @@ def save_infer_model(save_dir, exe, feed_vars, test_fetches, infer_prog): def main(): cfg = load_config(FLAGS.config) - - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) + check_config(cfg) + + main_arch = cfg.architecture # Use CPU for exporting inference model instead of GPU place = fluid.CPUPlace() diff --git a/slim/quantization/infer.py b/slim/quantization/infer.py index ed675e2f11905db6b23c45728630c4ec9ffe244f..e126029e8760ad8ab3c9d9eef327d47ae13a931d 100644 --- a/slim/quantization/infer.py +++ b/slim/quantization/infer.py @@ -29,7 +29,7 @@ from ppdet.core.workspace import load_config, merge_config, create from ppdet.utils.eval_utils import parse_fetches from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.utils.visualizer import visualize_results import ppdet.utils.checkpoint as checkpoint @@ -44,19 +44,15 @@ from paddleslim.quant import quant_aware, convert def main(): cfg = load_config(FLAGS.config) - - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + dataset = cfg.TestReader['dataset'] test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) diff --git a/slim/quantization/train.py b/slim/quantization/train.py index 429a92b083a1e5227481602c9bb0759bf71225c4..f0a84e7c218c3ed0c3ccf78cb7b21b361da109d6 100644 --- a/slim/quantization/train.py +++ b/slim/quantization/train.py @@ -29,12 +29,11 @@ from paddle import fluid from ppdet.core.workspace import load_config, merge_config, create from ppdet.data.reader import create_reader -from ppdet.utils.cli import print_total_cfg from ppdet.utils import dist_utils from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config import ppdet.utils.checkpoint as checkpoint from paddleslim.quant import quant_aware, convert import logging @@ -73,22 +72,14 @@ def main(): np.random.seed(local_seed) cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - - if 'log_iter' not in cfg: - cfg.log_iter = 20 - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() - if not FLAGS.dist or trainer_id == 0: - print_total_cfg(cfg) + + main_arch = cfg.architecture if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() diff --git a/slim/sensitive/sensitive.py b/slim/sensitive/sensitive.py index 44c4e1ef1609c1de6775ef6f0e016d2f018987e9..495b5082ff9a93bed28085c29f241ba674d3350b 100644 --- a/slim/sensitive/sensitive.py +++ b/slim/sensitive/sensitive.py @@ -41,12 +41,11 @@ from ppdet.core.workspace import load_config, merge_config, create from ppdet.data.reader import create_reader -from ppdet.utils.cli import print_total_cfg from ppdet.utils import dist_utils from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config import ppdet.utils.checkpoint as checkpoint from paddleslim.prune import sensitivity import logging @@ -60,12 +59,10 @@ def main(): print("FLAGS.config: {}".format(FLAGS.config)) cfg = load_config(FLAGS.config) - assert 'architecture' in cfg - main_arch = cfg.architecture - merge_config(FLAGS.opt) + check_config(cfg) - print_total_cfg(cfg) + main_arch = cfg.architecture place = fluid.CUDAPlace(0) exe = fluid.Executor(place) diff --git a/tools/eval.py b/tools/eval.py index 2feb5150fbb1c788cf743f63abde7f4c09b91174..b44ce67b4af0e292a58f22353edbdb4dd666d4a1 100644 --- a/tools/eval.py +++ b/tools/eval.py @@ -35,7 +35,7 @@ import paddle.fluid as fluid from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results, json_eval_results import ppdet.utils.checkpoint as checkpoint -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.data.reader import create_reader @@ -53,17 +53,15 @@ def main(): Main evaluate function """ cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + multi_scale_test = getattr(cfg, 'MultiScaleTEST', None) # define executor diff --git a/tools/export_model.py b/tools/export_model.py index f92031f6b789ecbbc2a14d3d05857bd5e8f7a425..7045d122149acfe243607b3fad0b8eb7a27fb032 100644 --- a/tools/export_model.py +++ b/tools/export_model.py @@ -23,6 +23,7 @@ from paddle import fluid from ppdet.core.workspace import load_config, merge_config, create from ppdet.utils.cli import ArgsParser import ppdet.utils.checkpoint as checkpoint +from ppdet.utils.check import check_config import yaml import logging from collections import OrderedDict @@ -166,13 +167,10 @@ def save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog): def main(): cfg = load_config(FLAGS.config) - - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) + check_config(cfg) + + main_arch = cfg.architecture # Use CPU for exporting inference model instead of GPU place = fluid.CPUPlace() diff --git a/tools/face_eval.py b/tools/face_eval.py index 09580cfe3db2769e00bd2e296623e16308e04c2a..05642efdfddf161a8664610ffa5428fc87a07cc1 100644 --- a/tools/face_eval.py +++ b/tools/face_eval.py @@ -210,16 +210,13 @@ def main(): Main evaluate function """ cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) + main_arch = cfg.architecture + # define executor place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) diff --git a/tools/infer.py b/tools/infer.py index a81413b820ec9457e64b8d6f568d2bbd58fd3455..fdc370e53e99a1b8cfe4e4bd02d0b3784ad632ba 100644 --- a/tools/infer.py +++ b/tools/infer.py @@ -41,7 +41,7 @@ from ppdet.core.workspace import load_config, merge_config, create from ppdet.utils.eval_utils import parse_fetches from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config from ppdet.utils.visualizer import visualize_results import ppdet.utils.checkpoint as checkpoint @@ -98,18 +98,15 @@ def get_test_images(infer_dir, infer_img): def main(): cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + dataset = cfg.TestReader['dataset'] test_images = get_test_images(FLAGS.infer_dir, FLAGS.infer_img) diff --git a/tools/train.py b/tools/train.py index e2d21cf80b11ed0fb19e29d9709f87e68607f641..725b84f92f5742fda5036b0077c3ca00c994957a 100644 --- a/tools/train.py +++ b/tools/train.py @@ -49,7 +49,7 @@ from ppdet.utils import dist_utils from ppdet.utils.eval_utils import parse_fetches, eval_run, eval_results from ppdet.utils.stats import TrainingStats from ppdet.utils.cli import ArgsParser -from ppdet.utils.check import check_gpu, check_version +from ppdet.utils.check import check_gpu, check_version, check_config import ppdet.utils.checkpoint as checkpoint import logging @@ -72,21 +72,15 @@ def main(): np.random.seed(0) cfg = load_config(FLAGS.config) - if 'architecture' in cfg: - main_arch = cfg.architecture - else: - raise ValueError("'architecture' not specified in config file.") - merge_config(FLAGS.opt) - - if 'log_iter' not in cfg: - cfg.log_iter = 20 - + check_config(cfg) # check if set use_gpu=True in paddlepaddle cpu version check_gpu(cfg.use_gpu) # check if paddlepaddle version is satisfied check_version() + main_arch = cfg.architecture + if cfg.use_gpu: devices_num = fluid.core.get_cuda_device_count() else: