diff --git a/fluid/PaddleCV/video_classification/.run_ce.sh b/fluid/PaddleCV/video_classification/.run_ce.sh new file mode 100755 index 0000000000000000000000000000000000000000..9f80528916ac77a7d5ccd680ed9bceb9a06b608e --- /dev/null +++ b/fluid/PaddleCV/video_classification/.run_ce.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +export MKL_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + + +cudaid=${video_classification:=0} # use 0-th card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +export FLAGS_fraction_of_gpu_memory_to_use=0.5 +FLAGS_benchmark=true python train.py --batch_size=16 --total_videos=9537 --class_dim=101 --num_epochs=1 --image_shape=3,224,224 --model_save_dir=output/ --with_mem_opt=True --lr_init=0.01 --num_layers=50 --seg_num=7 --enable_ce=True | python _ce.py +export FLAGS_fraction_of_gpu_memory_to_use=0.92 + + +cudaid=${video_classification_4:=0,1,2,3} # use 0,1,2,3 card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +FLAGS_benchmark=true python train.py --batch_size=16 --total_videos=9537 --class_dim=101 --num_epochs=1 --image_shape=3,224,224 --model_save_dir=output/ --with_mem_opt=True --lr_init=0.01 --num_layers=50 --seg_num=7 --enable_ce=True | python _ce.py + + +cudaid=${video_classification_8:=0,1,2,3,4,5,6,7} # use 0,1,2,3,4,5,6,7 card as default +export CUDA_VISIBLE_DEVICES=$cudaid + +FLAGS_benchmark=true python train.py --batch_size=16 --total_videos=9537 --class_dim=101 --num_epochs=1 --image_shape=3,224,224 --model_save_dir=output/ --with_mem_opt=True --lr_init=0.01 --num_layers=50 --seg_num=7 --enable_ce=True | python _ce.py diff --git a/fluid/PaddleCV/video_classification/__init__.py b/fluid/PaddleCV/video_classification/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/fluid/PaddleCV/video_classification/_ce.py b/fluid/PaddleCV/video_classification/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..2f392869c5f58907d672564e4f13e348394fe5f3 --- /dev/null +++ b/fluid/PaddleCV/video_classification/_ce.py @@ -0,0 +1,65 @@ +# this file is only used for continuous evaluation test! + +import os +import sys +sys.path.append(os.environ['ceroot']) +from kpi import CostKpi +from kpi import DurationKpi + + +each_pass_duration_card1_kpi = DurationKpi('each_pass_duration_card1', 0.08, 0, actived=True) +train_loss_card1_kpi = CostKpi('train_loss_card1', 0.08, 0) +each_pass_duration_card4_kpi = DurationKpi('each_pass_duration_card4', 0.08, 0, actived=True) +train_loss_card4_kpi = CostKpi('train_loss_card4', 0.08, 0) +each_pass_duration_card8_kpi = DurationKpi('each_pass_duration_card8', 0.08, 0, actived=True) +train_loss_card8_kpi = CostKpi('train_loss_card8', 0.08, 0) + +tracking_kpis = [ + each_pass_duration_card1_kpi, + train_loss_card1_kpi, + each_pass_duration_card4_kpi, + train_loss_card4_kpi, + each_pass_duration_card8_kpi, + train_loss_card8_kpi, + ] + + +def parse_log(log): + ''' + This method should be implemented by model developers. + + The suggestion: + + each line in the log should be key, value, for example: + + " + train_cost\t1.0 + test_cost\t1.0 + train_cost\t1.0 + train_cost\t1.0 + train_acc\t1.2 + " + ''' + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + log_to_ce(log) diff --git a/fluid/PaddleCV/video_classification/train.py b/fluid/PaddleCV/video_classification/train.py index e873cdb608ccfd83a8600e77b4837e2e52872549..67dd9715138c79c2ef2181f44550e8f237b33d3a 100644 --- a/fluid/PaddleCV/video_classification/train.py +++ b/fluid/PaddleCV/video_classification/train.py @@ -26,6 +26,8 @@ add_arg('model_save_dir', str, "output", "Model save directory.") add_arg('pretrained_model', str, None, "Whether to use pretrained model.") add_arg('total_videos', int, 9537, "Training video number.") add_arg('lr_init', float, 0.01, "Set initial learning rate.") +add_arg('enable_ce', bool, False, "If set True, enable continuous evaluation job.") +add_arg('num_devices', int, 1, "Training video number.") # yapf: enable @@ -55,6 +57,11 @@ def train(args): acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) + if args.enable_ce: + SEED = 102 + fluid.default_main_program().random_seed = SEED + fluid.default_startup_program().random_seed = SEED + # for test inference_program = fluid.default_main_program().clone(for_test=True) @@ -92,6 +99,9 @@ def train(args): # reader train_reader = paddle.batch(reader.train(seg_num), batch_size=batch_size, drop_last=True) + if args.enable_ce: + train_reader = paddle.batch(reader.train(seg_num), batch_size=batch_size, drop_last=False) + # test in single GPU test_reader = paddle.batch(reader.test(seg_num), batch_size=batch_size / 16) feeder = fluid.DataFeeder(place=place, feed_list=[image, label]) @@ -100,6 +110,7 @@ def train(args): fetch_list = [avg_cost.name, acc_top1.name, acc_top5.name] + total_time = 0 # train for pass_id in range(num_epochs): train_info = [[], [], []] @@ -109,6 +120,7 @@ def train(args): loss, acc1, acc5 = train_exe.run(fetch_list, feed=feeder.feed(data)) t2 = time.time() period = t2 - t1 + total_time += period loss = np.mean(np.array(loss)) acc1 = np.mean(np.array(acc1)) acc5 = np.mean(np.array(acc5)) @@ -169,6 +181,23 @@ def train(args): os.makedirs(model_path) fluid.io.save_persistables(exe, model_path) + if args.enable_ce: + gpu_num = get_cards(args) + epoch_idx = num_epochs + print("kpis\teach_pass_duration_card%s\t%s" % + (gpu_num, total_time / epoch_idx)) + print("kpis\ttrain_loss_card%s\t%s" % + (gpu_num, train_loss)) + + +def get_cards(args): + if args.enable_ce: + cards = os.environ.get('CUDA_VISIBLE_DEVICES') + num = len(cards.split(",")) + return num + else: + return args.num_devices + def main(): args = parser.parse_args()