diff --git a/BERT/.run_ce.sh b/BERT/.run_ce.sh new file mode 100644 index 0000000000000000000000000000000000000000..c64780a986fd88e16895c74716805a56a8c6fd34 --- /dev/null +++ b/BERT/.run_ce.sh @@ -0,0 +1,39 @@ +export FLAGS_enable_parallel_graph=1 +export FLAGS_sync_nccl_allreduce=1 + +BERT_BASE_PATH="chinese_L-12_H-768_A-12" +TASK_NAME='xnli' +DATA_PATH=data/xnli/XNLI-MT-1.0 +CKPT_PATH=pretrain_model + +train(){ +python -u run_classifier.py --task_name ${TASK_NAME} \ + --use_cuda true \ + --do_train true \ + --do_val false \ + --do_test false \ + --batch_size 8192 \ + --in_tokens true \ + --init_checkpoint pretrain_model/chinese_L-12_H-768_A-12/ \ + --data_dir ${DATA_PATH} \ + --vocab_path pretrain_model/chinese_L-12_H-768_A-12/vocab.txt \ + --checkpoints ${CKPT_PATH} \ + --save_steps 1000 \ + --weight_decay 0.01 \ + --warmup_proportion 0.0 \ + --validation_steps 25 \ + --epoch 1 \ + --max_seq_len 512 \ + --bert_config_path pretrain_model/chinese_L-12_H-768_A-12/bert_config.json \ + --learning_rate 1e-4 \ + --skip_steps 10 \ + --random_seed 100 \ + --enable_ce \ + --shuffle false +} + +export CUDA_VISIBLE_DEVICES=0 +train | python _ce.py + +export CUDA_VISIBLE_DEVICES=0,1,2,3 +train | python _ce.py diff --git a/BERT/__init__.py b/BERT/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/BERT/_ce.py b/BERT/_ce.py new file mode 100644 index 0000000000000000000000000000000000000000..24659d52b0338f2a37d276bd2a4a00004e15f0d4 --- /dev/null +++ b/BERT/_ce.py @@ -0,0 +1,69 @@ +####this file is only used for continuous evaluation test! + +import os +import sys +sys.path.insert(0, os.environ['ceroot']) +#sys.path.append('.') +from kpi import CostKpi, DurationKpi, AccKpi + +#### NOTE kpi.py should shared in models in some way!!!! + +train_cost_xnli_card1_kpi = CostKpi('train_cost_xnli_card1', 0.002, 0, actived=True) +train_acc_xnli_card1_kpi = AccKpi('train_acc_xnli_card1', 0.002, 0, actived=True) +train_duration_xnli_card1_kpi = DurationKpi( + 'train_duration_xnli_card1', 0.01, 0, actived=True) +train_cost_xnli_card4_kpi = CostKpi('train_cost_xnli_card4', 0.002, 0, actived=True) +train_acc_xnli_card4_kpi = AccKpi('train_acc_xnli_card4', 0.02, 0, actived=True) +train_duration_xnli_card4_kpi = DurationKpi( + 'train_duration_xnli_card4', 0.03, 0, actived=True) + +tracking_kpis = [ + train_cost_xnli_card1_kpi, + train_acc_xnli_card1_kpi, + train_duration_xnli_card1_kpi, + train_cost_xnli_card4_kpi, + train_acc_xnli_card4_kpi, + train_duration_xnli_card4_kpi, +] + + +def parse_log(log): + ''' + This method should be implemented by model developers. + The suggestion: + each line in the log should be key, value, for example: + " + train_cost\t1.0 + test_cost\t1.0 + train_cost\t1.0 + train_cost\t1.0 + train_acc\t1.2 + " + ''' + for line in log.split('\n'): + fs = line.strip().split('\t') + print(fs) + if len(fs) == 3 and fs[0] == 'kpis': + print("-----%s" % fs) + kpi_name = fs[1] + kpi_value = float(fs[2]) + yield kpi_name, kpi_value + + +def log_to_ce(log): + kpi_tracker = {} + for kpi in tracking_kpis: + kpi_tracker[kpi.name] = kpi + + for (kpi_name, kpi_value) in parse_log(log): + print(kpi_name, kpi_value) + kpi_tracker[kpi_name].add_record(kpi_value) + kpi_tracker[kpi_name].persist() + + +if __name__ == '__main__': + log = sys.stdin.read() + print("*****") + print(log) + print("****") + log_to_ce(log) diff --git a/BERT/run_classifier.py b/BERT/run_classifier.py index 473a8d51d77c0affbf36810f263cac899f5a6fa6..5e724d8a377ed454c0570ab5d43e3180e4d144b0 100644 --- a/BERT/run_classifier.py +++ b/BERT/run_classifier.py @@ -32,6 +32,7 @@ from model.classifier import create_model from optimization import optimization from utils.args import ArgumentGroup, print_arguments, check_cuda from utils.init import init_pretraining_params, init_checkpoint +from utils.cards import get_cards import dist_utils num_trainers = int(os.environ.get('PADDLE_TRAINERS_NUM', 1)) @@ -87,6 +88,8 @@ run_type_g.add_arg("do_train", bool, True, "Whether to pe run_type_g.add_arg("do_val", bool, True, "Whether to perform evaluation on dev data set.") run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.") +parser.add_argument("--enable_ce", action='store_true', help="The flag indicating whether to run the task for continuous evaluation.") + args = parser.parse_args() # yapf: enable. @@ -298,6 +301,7 @@ def main(args): total_cost, total_acc, total_num_seqs = [], [], [] time_begin = time.time() throughput = [] + ce_info = [] while True: try: # steps += 1 @@ -341,6 +345,7 @@ def main(args): current_epoch, current_example, num_train_examples, steps, np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs)) + ce_info.append([np.sum(total_cost) / np.sum(total_num_seqs), np.sum(total_acc) / np.sum(total_num_seqs), used_time]) if steps > 0 : throughput.append( args.skip_steps / used_time) log_record = log_record + ", speed: %f steps/s" % (args.skip_steps / used_time) @@ -388,6 +393,24 @@ def main(args): fluid.io.save_persistables(exe, save_path, train_program) train_pyreader.reset() break + if args.enable_ce: + card_num = get_cards() + ce_cost = 0 + ce_acc = 0 + ce_time = 0 + try: + ce_cost = ce_info[-2][0] + ce_acc = ce_info[-2][1] + ce_time = ce_info[-2][2] + except: + print("ce info error") + print("kpis\ttrain_duration_%s_card%s\t%s" % + (args.task_name, card_num, ce_time)) + print("kpis\ttrain_cost_%s_card%s\t%f" % + (args.task_name, card_num, ce_cost)) + print("kpis\ttrain_acc_%s_card%s\t%f" % + (args.task_name, card_num, ce_acc)) + # final eval on dev set if args.do_val: diff --git a/BERT/utils/cards.py b/BERT/utils/cards.py new file mode 100644 index 0000000000000000000000000000000000000000..9ba9aa6d2ee81eebfc8c02bdef5d50dff7d96f6e --- /dev/null +++ b/BERT/utils/cards.py @@ -0,0 +1,28 @@ +# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os + +def get_cards(): + """ + get gpu cards number + """ + num = 0 + cards = os.environ.get('CUDA_VISIBLE_DEVICES', '') + if cards != '': + num = len(cards.split(",")) + return num + +