提交 a52efec3 编写于 作者: littletomatodonkey's avatar littletomatodonkey

fix train.py

上级 093818a9
...@@ -23,7 +23,7 @@ logging.basicConfig( ...@@ -23,7 +23,7 @@ logging.basicConfig(
def time_zone(sec, fmt): def time_zone(sec, fmt):
real_time = datetime.datetime.now() + datetime.timedelta(hours=8) real_time = datetime.datetime.now()
return real_time.timetuple() return real_time.timetuple()
......
...@@ -13,12 +13,13 @@ ...@@ -13,12 +13,13 @@
# limitations under the License. # limitations under the License.
from __future__ import absolute_import from __future__ import absolute_import
import program from paddle.distributed import ParallelEnv
from ppcls.utils import logger import paddle
from ppcls.utils.save_load import init_model, save_model
from ppcls.utils.config import get_config
from ppcls.data import Reader from ppcls.data import Reader
import paddle.fluid as fluid from ppcls.utils.config import get_config
from ppcls.utils.save_load import init_model, save_model
from ppcls.utils import logger
import program
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
...@@ -53,69 +54,66 @@ def main(args): ...@@ -53,69 +54,66 @@ def main(args):
# assign the place # assign the place
use_gpu = config.get("use_gpu", True) use_gpu = config.get("use_gpu", True)
if use_gpu: if use_gpu:
gpu_id = fluid.dygraph.ParallelEnv().dev_id gpu_id = ParallelEnv().dev_id
place = fluid.CUDAPlace(gpu_id) place = paddle.CUDAPlace(gpu_id)
else: else:
place = fluid.CPUPlace() place = paddle.CPUPlace()
use_data_parallel = int(os.getenv("PADDLE_TRAINERS_NUM", 1)) != 1 use_data_parallel = int(os.getenv("PADDLE_TRAINERS_NUM", 1)) != 1
config["use_data_parallel"] = use_data_parallel config["use_data_parallel"] = use_data_parallel
with fluid.dygraph.guard(place): paddle.disable_static(place)
net = program.create_model(config.ARCHITECTURE, config.classes_num)
net = program.create_model(config.ARCHITECTURE, config.classes_num)
optimizer = program.create_optimizer(
config, parameter_list=net.parameters()) optimizer = program.create_optimizer(
config, parameter_list=net.parameters())
if config["use_data_parallel"]:
strategy = fluid.dygraph.parallel.prepare_context() if config["use_data_parallel"]:
net = fluid.dygraph.parallel.DataParallel(net, strategy) strategy = paddle.distributed.init_parallel_env()
net = paddle.DataParallel(net, strategy)
# load model from checkpoint or pretrained model
init_model(config, net, optimizer) # load model from checkpoint or pretrained model
init_model(config, net, optimizer)
train_dataloader = program.create_dataloader()
train_reader = Reader(config, 'train')() train_dataloader = program.create_dataloader()
train_dataloader.set_sample_list_generator(train_reader, place) train_reader = Reader(config, 'train')()
train_dataloader.set_sample_list_generator(train_reader, place)
if config.validate:
valid_dataloader = program.create_dataloader() if config.validate:
valid_reader = Reader(config, 'valid')() valid_dataloader = program.create_dataloader()
valid_dataloader.set_sample_list_generator(valid_reader, place) valid_reader = Reader(config, 'valid')()
valid_dataloader.set_sample_list_generator(valid_reader, place)
best_top1_acc = 0.0 # best top1 acc record
for epoch_id in range(config.epochs): best_top1_acc = 0.0 # best top1 acc record
net.train() for epoch_id in range(config.epochs):
# 1. train with train dataset net.train()
program.run(train_dataloader, config, net, optimizer, epoch_id, # 1. train with train dataset
'train') program.run(train_dataloader, config, net, optimizer, epoch_id,
'train')
if not config["use_data_parallel"] or fluid.dygraph.parallel.Env(
).local_rank == 0: if not config["use_data_parallel"] or ParallelEnv().local_rank == 0:
# 2. validate with validate dataset # 2. validate with validate dataset
if config.validate and epoch_id % config.valid_interval == 0: if config.validate and epoch_id % config.valid_interval == 0:
net.eval() net.eval()
top1_acc = program.run(valid_dataloader, config, net, None, top1_acc = program.run(valid_dataloader, config, net, None,
epoch_id, 'valid') epoch_id, 'valid')
if top1_acc > best_top1_acc: if top1_acc > best_top1_acc:
best_top1_acc = top1_acc best_top1_acc = top1_acc
message = "The best top1 acc {:.5f}, in epoch: {:d}".format( message = "The best top1 acc {:.5f}, in epoch: {:d}".format(
best_top1_acc, epoch_id) best_top1_acc, epoch_id)
logger.info("{:s}".format( logger.info("{:s}".format(logger.coloring(message, "RED")))
logger.coloring(message, "RED"))) if epoch_id % config.save_interval == 0:
if epoch_id % config.save_interval == 0:
model_path = os.path.join(config.model_save_dir,
model_path = os.path.join( config.ARCHITECTURE["name"])
config.model_save_dir, save_model(net, optimizer, model_path, "best_model")
config.ARCHITECTURE["name"])
save_model(net, optimizer, model_path, # 3. save the persistable model
"best_model") if epoch_id % config.save_interval == 0:
model_path = os.path.join(config.model_save_dir,
# 3. save the persistable model config.ARCHITECTURE["name"])
if epoch_id % config.save_interval == 0: save_model(net, optimizer, model_path, epoch_id)
model_path = os.path.join(config.model_save_dir,
config.ARCHITECTURE["name"])
save_model(net, optimizer, model_path, epoch_id)
if __name__ == '__main__': if __name__ == '__main__':
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册