diff --git a/PaddleCV/Paddle3D/PointNet++/eval_cls.py b/PaddleCV/Paddle3D/PointNet++/eval_cls.py index a25731a658b18ec8814b8521303a90b6f5dcf02b..ea9cfbef343840c3f6f40625172135ba811ae27b 100644 --- a/PaddleCV/Paddle3D/PointNet++/eval_cls.py +++ b/PaddleCV/Paddle3D/PointNet++/eval_cls.py @@ -55,7 +55,7 @@ def parse_args(): parser.add_argument( '--num_points', type=int, - default=4096, + default=2048, help='number of points in a sample, default: 4096') parser.add_argument( '--num_classes', @@ -101,7 +101,7 @@ def eval(): eval_model.build_model() eval_feeds = eval_model.get_feeds() eval_outputs = eval_model.get_outputs() - eval_pyreader = eval_model.get_pyreader() + eval_loader = eval_model.get_loader() eval_prog = eval_prog.clone(True) eval_keys, eval_values = parse_outputs(eval_outputs) @@ -109,21 +109,20 @@ def eval(): exe = fluid.Executor(place) exe.run(startup) - assert os.path.exists(args.weights), "weights {} not exists.".format(args.weights) - def if_exist(var): - return os.path.exists(os.path.join(args.weights, var.name)) - fluid.io.load_vars(exe, args.weights, eval_prog, predicate=if_exist) + assert os.path.exists("{}.pdparams".format(args.weights)), \ + "Given resume weight {}.pdparams not exist.".format(args.weights) + fluid.load(eval_prog, args.weights, exe) eval_compile_prog = fluid.compiler.CompiledProgram(eval_prog) # get reader modelnet_reader = ModelNet40ClsReader(args.data_dir, mode='test') eval_reader = modelnet_reader.get_reader(args.batch_size, args.num_points) - eval_pyreader.decorate_sample_list_generator(eval_reader, place) + eval_loader.set_sample_list_generator(eval_reader, place) eval_stat = Stat() try: - eval_pyreader.start() + eval_loader.start() eval_iter = 0 eval_periods = [] while True: @@ -141,7 +140,7 @@ def eval(): except fluid.core.EOFException: logger.info("[EVAL] Eval finished, {}average time: {:.2f}".format(eval_stat.get_mean_log(), np.mean(eval_periods[1:]))) finally: - eval_pyreader.reset() + eval_loader.reset() if __name__ == "__main__": diff --git a/PaddleCV/Paddle3D/PointNet++/eval_seg.py b/PaddleCV/Paddle3D/PointNet++/eval_seg.py index 56c257bb6dee2027a49d3abe48097bb7bfd4a610..027211dc833264c5274ff2440d23fd3d511c4f15 100644 --- a/PaddleCV/Paddle3D/PointNet++/eval_seg.py +++ b/PaddleCV/Paddle3D/PointNet++/eval_seg.py @@ -100,7 +100,7 @@ def eval(): eval_model.build_model() eval_feeds = eval_model.get_feeds() eval_outputs = eval_model.get_outputs() - eval_pyreader = eval_model.get_pyreader() + eval_loader = eval_model.get_loader() eval_prog = eval_prog.clone(True) eval_keys, eval_values = parse_outputs(eval_outputs) @@ -108,21 +108,20 @@ def eval(): exe = fluid.Executor(place) exe.run(startup) - assert os.path.exists(args.weights), "weights {} not exists.".format(args.weights) - def if_exist(var): - return os.path.exists(os.path.join(args.weights, var.name)) - fluid.io.load_vars(exe, args.weights, eval_prog, predicate=if_exist) + assert os.path.exists("{}.pdparams".format(args.weights)), \ + "Given resume weight {}.pdparams not exist.".format(args.weights) + fluid.load(eval_prog, args.weights, exe) eval_compile_prog = fluid.compiler.CompiledProgram(eval_prog) # get reader indoor_reader = Indoor3DReader(args.data_dir) eval_reader = indoor_reader.get_reader(args.batch_size, args.num_points, mode='test') - eval_pyreader.decorate_sample_list_generator(eval_reader, place) + eval_loader.set_sample_list_generator(eval_reader, place) eval_stat = Stat() try: - eval_pyreader.start() + eval_loader.start() eval_iter = 0 eval_periods = [] while True: @@ -140,7 +139,7 @@ def eval(): except fluid.core.EOFException: logger.info("[EVAL] Eval finished, {}average time: {:.2f}".format(eval_stat.get_mean_log(), np.mean(eval_periods[1:]))) finally: - eval_pyreader.reset() + eval_loader.reset() if __name__ == "__main__": diff --git a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_cls.py b/PaddleCV/Paddle3D/PointNet++/models/pointnet2_cls.py index 778433c17794ebfeb520f59655c2c4772ce23b0a..b0f4fb88796f7415084610dd2999e93a9a55fce7 100644 --- a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_cls.py +++ b/PaddleCV/Paddle3D/PointNet++/models/pointnet2_cls.py @@ -35,16 +35,22 @@ class PointNet2Cls(object): self.num_points = num_points self.use_xyz = use_xyz self.out_feature = None - self.pyreader = None + self.loader = None self.model_config() def model_config(self): self.SA_confs = [] def build_input(self): - self.xyz = fluid.layers.data(name='xyz', shape=[self.num_points, 3], dtype='float32', lod_level=0) - self.label = fluid.layers.data(name='label', shape=[1], dtype='int64', lod_level=0) - self.pyreader = fluid.io.PyReader( + self.xyz = fluid.data(name='xyz', + shape=[None, self.num_points, 3], + dtype='float32', + lod_level=0) + self.label = fluid.data(name='label', + shape=[None, 1], + dtype='int64', + lod_level=0) + self.loader = fluid.io.DataLoader.from_generator( feed_list=[self.xyz, self.label], capacity=64, use_double_buffer=True, @@ -65,11 +71,11 @@ class PointNet2Cls(object): **SA_conf) out = fluid.layers.squeeze(feature, axes=[-1]) - out = fc_bn(out,out_channels=512, bn=True, bn_momentum=bn_momentum, name="fc_1") + out = fc_bn(out, out_channels=512, bn=True, bn_momentum=bn_momentum, name="fc_1") out = fluid.layers.dropout(out, 0.5, dropout_implementation="upscale_in_train") - out = fc_bn(out,out_channels=256, bn=True, bn_momentum=bn_momentum, name="fc_2") + out = fc_bn(out, out_channels=256, bn=True, bn_momentum=bn_momentum, name="fc_2") out = fluid.layers.dropout(out, 0.5, dropout_implementation="upscale_in_train") - out = fc_bn(out,out_channels=self.num_classes, act=None, name="fc_3") + out = fc_bn(out, out_channels=self.num_classes, act=None, name="fc_3") pred = fluid.layers.softmax(out) # calc loss @@ -87,8 +93,8 @@ class PointNet2Cls(object): def get_outputs(self): return {"loss": self.loss, "accuracy": self.acc1} - def get_pyreader(self): - return self.pyreader + def get_loader(self): + return self.loader class PointNet2ClsSSG(PointNet2Cls): diff --git a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_seg.py b/PaddleCV/Paddle3D/PointNet++/models/pointnet2_seg.py index 04d6d73e2b6d066aa940ab176698af3738b4de94..491f3ac56f8e0eb96531cea5c45224dd6c3e8d63 100644 --- a/PaddleCV/Paddle3D/PointNet++/models/pointnet2_seg.py +++ b/PaddleCV/Paddle3D/PointNet++/models/pointnet2_seg.py @@ -36,7 +36,7 @@ class PointNet2SemSeg(object): self.use_xyz = use_xyz self.feed_vars = [] self.out_feature = None - self.pyreader = None + self.loader = None self.model_config() def model_config(self): @@ -44,10 +44,19 @@ class PointNet2SemSeg(object): self.FP_confs = [] def build_input(self): - self.xyz = fluid.layers.data(name='xyz', shape=[self.num_points, 3], dtype='float32', lod_level=0) - self.feature = fluid.layers.data(name='feature', shape=[self.num_points, 6], dtype='float32', lod_level=0) - self.label = fluid.layers.data(name='label', shape=[self.num_points, 1], dtype='int64', lod_level=0) - self.pyreader = fluid.io.PyReader( + self.xyz = fluid.data(name='xyz', + shape=[None, self.num_points, 3], + dtype='float32', + lod_level=0) + self.feature = fluid.data(name='feature', + shape=[None, self.num_points, 6], + dtype='float32', + lod_level=0) + self.label = fluid.data(name='label', + shape=[None, self.num_points, 1], + dtype='int64', + lod_level=0) + self.loader = fluid.io.DataLoader.from_generator( feed_list=[self.xyz, self.feature, self.label], capacity=64, use_double_buffer=True, @@ -103,8 +112,8 @@ class PointNet2SemSeg(object): def get_outputs(self): return {"loss": self.loss, "accuracy": self.acc1} - def get_pyreader(self): - return self.pyreader + def get_loader(self): + return self.loader class PointNet2SemSegSSG(PointNet2SemSeg): diff --git a/PaddleCV/Paddle3D/PointNet++/train_cls.py b/PaddleCV/Paddle3D/PointNet++/train_cls.py index f9b49b9dceacc48848a9fa9c3570e2fbf8d79a76..eb58f6826f6cf076e56d76cd771b7d8fb2dee943 100644 --- a/PaddleCV/Paddle3D/PointNet++/train_cls.py +++ b/PaddleCV/Paddle3D/PointNet++/train_cls.py @@ -54,7 +54,7 @@ def parse_args(): parser.add_argument( '--num_points', type=int, - default=4096, + default=2048, help='number of points in a sample, default: 4096') parser.add_argument( '--num_classes', @@ -148,7 +148,7 @@ def train(): PointNet2ClsSSG(args.num_classes, args.num_points) train_model.build_model(bn_momentum=args.bn_momentum) train_feeds = train_model.get_feeds() - train_pyreader = train_model.get_pyreader() + train_loader = train_model.get_loader() train_outputs = train_model.get_outputs() train_loss = train_outputs['loss'] lr = fluid.layers.exponential_decay( @@ -157,9 +157,13 @@ def train(): decay_rate=args.lr_decay, staircase=True) lr = fluid.layers.clip(lr, 1e-5, args.lr) + params = [] + for var in train_prog.list_vars(): + if fluid.io.is_parameter(var): + params.append(var.name) optimizer = fluid.optimizer.Adam(learning_rate=lr, regularization=fluid.regularizer.L2Decay(args.weight_decay)) - optimizer.minimize(train_loss) + optimizer.minimize(train_loss, parameter_list=params) train_keys, train_values = parse_outputs(train_outputs) test_prog = fluid.Program() @@ -171,7 +175,7 @@ def train(): test_model.build_model() test_feeds = test_model.get_feeds() test_outputs = test_model.get_outputs() - test_pyreader = test_model.get_pyreader() + test_loader = test_model.get_loader() test_prog = test_prog.clone(True) test_keys, test_values = parse_outputs(test_outputs) @@ -180,12 +184,13 @@ def train(): exe.run(startup) if args.resume: - assert os.path.exists(args.resume), \ - "Given resume weight dir {} not exist.".format(args.resume) - def if_exist(var): - return os.path.exists(os.path.join(args.resume, var.name)) - fluid.io.load_vars( - exe, args.resume, predicate=if_exist, main_program=train_prog) + assert os.path.exists("{}.pdparams".format(args.resume)), \ + "Given resume weight {}.pdparams not exist.".format(args.resume) + assert os.path.exists("{}.pdopt".format(args.resume)), \ + "Given resume optimizer state {}.pdopt not exist.".format(args.resume) + assert os.path.exists("{}.pdmodel".format(args.resume)), \ + "Given resume model parameter list {}.pdmodel not exist.".format(args.resume) + fluid.load(train_prog, args.resume, exe) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = False @@ -200,7 +205,7 @@ def train(): if os.path.isdir(path): shutil.rmtree(path) logger.info("Save model to {}".format(path)) - fluid.io.save_persistables(exe, path, prog) + fluid.save(prog, path) # get reader trans_list = [ @@ -213,10 +218,10 @@ def train(): ] modelnet_reader = ModelNet40ClsReader(args.data_dir, mode='train', transforms=trans_list) train_reader = modelnet_reader.get_reader(args.batch_size, args.num_points) - train_pyreader.decorate_sample_list_generator(train_reader, place) + train_loader.set_sample_list_generator(train_reader, place) modelnet_reader = ModelNet40ClsReader(args.data_dir, mode='test', transforms=None) test_reader = modelnet_reader.get_reader(args.batch_size, args.num_points) - test_pyreader.decorate_sample_list_generator(test_reader, place) + test_loader.set_sample_list_generator(test_reader, place) train_stat = Stat() test_stat = Stat() @@ -226,7 +231,7 @@ def train(): for epoch_id in range(args.epoch): try: - train_pyreader.start() + train_loader.start() train_iter = 0 train_periods = [] while True: @@ -251,7 +256,7 @@ def train(): # evaluation if not args.enable_ce: try: - test_pyreader.start() + test_loader.start() test_iter = 0 test_periods = [] while True: @@ -269,12 +274,12 @@ def train(): except fluid.core.EOFException: logger.info("[TEST] Epoch {} finished, {}average time: {:.2f}".format(epoch_id, test_stat.get_mean_log(), np.mean(test_periods[1:]))) finally: - test_pyreader.reset() + test_loader.reset() test_stat.reset() test_periods = [] finally: - train_pyreader.reset() + train_loader.reset() train_stat.reset() train_periods = [] diff --git a/PaddleCV/Paddle3D/PointNet++/train_seg.py b/PaddleCV/Paddle3D/PointNet++/train_seg.py index 11eaabc7b298f70c9b64faa630541ce8d1d89ec6..4bdd73af5bfc4ab97cd5c5405e1fb0c80e44f961 100644 --- a/PaddleCV/Paddle3D/PointNet++/train_seg.py +++ b/PaddleCV/Paddle3D/PointNet++/train_seg.py @@ -147,7 +147,7 @@ def train(): PointNet2SemSegSSG(args.num_classes, args.num_points) train_model.build_model(bn_momentum=args.bn_momentum) train_feeds = train_model.get_feeds() - train_pyreader = train_model.get_pyreader() + train_loader = train_model.get_loader() train_outputs = train_model.get_outputs() train_loss = train_outputs['loss'] lr = fluid.layers.exponential_decay( @@ -156,9 +156,13 @@ def train(): decay_rate=args.lr_decay, staircase=True) lr = fluid.layers.clip(lr, 1e-5, args.lr) + params = [] + for var in train_prog.list_vars(): + if fluid.io.is_parameter(var): + params.append(var.name) optimizer = fluid.optimizer.Adam(learning_rate=lr, regularization=fluid.regularizer.L2Decay(args.weight_decay)) - optimizer.minimize(train_loss) + optimizer.minimize(train_loss, parameter_list=params) train_keys, train_values = parse_outputs(train_outputs) test_prog = fluid.Program() @@ -170,7 +174,7 @@ def train(): test_model.build_model() test_feeds = test_model.get_feeds() test_outputs = test_model.get_outputs() - test_pyreader = test_model.get_pyreader() + test_loader = test_model.get_loader() test_prog = test_prog.clone(True) test_keys, test_values = parse_outputs(test_outputs) @@ -179,12 +183,13 @@ def train(): exe.run(startup) if args.resume: - assert os.path.exists(args.resume), \ - "Given resume weight dir {} not exist.".format(args.resume) - def if_exist(var): - return os.path.exists(os.path.join(args.resume, var.name)) - fluid.io.load_vars( - exe, args.resume, predicate=if_exist, main_program=train_prog) + assert os.path.exists("{}.pdparams".format(args.resume)), \ + "Given resume weight {}.pdparams not exist.".format(args.resume) + assert os.path.exists("{}.pdopt".format(args.resume)), \ + "Given resume optimizer state {}.pdopt not exist.".format(args.resume) + assert os.path.exists("{}.pdmodel".format(args.resume)), \ + "Given resume model parameter list {}.pdmodel not exist.".format(args.resume) + fluid.load(train_prog, args.resume, exe) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = False @@ -199,14 +204,14 @@ def train(): if os.path.isdir(path): shutil.rmtree(path) logger.info("Save model to {}".format(path)) - fluid.io.save_persistables(exe, path, prog) + fluid.save(prog, path) # get reader indoor_reader = Indoor3DReader(args.data_dir) train_reader = indoor_reader.get_reader(args.batch_size, args.num_points, mode='train') test_reader = indoor_reader.get_reader(args.batch_size, args.num_points, mode='test') - train_pyreader.decorate_sample_list_generator(train_reader, place) - test_pyreader.decorate_sample_list_generator(test_reader, place) + train_loader.set_sample_list_generator(train_reader, place) + test_loader.set_sample_list_generator(test_reader, place) train_stat = Stat() test_stat = Stat() @@ -216,7 +221,7 @@ def train(): for epoch_id in range(args.epoch): try: - train_pyreader.start() + train_loader.start() train_iter = 0 train_periods = [] while True: @@ -236,12 +241,12 @@ def train(): except fluid.core.EOFException: logger.info("[TRAIN] Epoch {} finished, {}average time: {:.2f}".format(epoch_id, train_stat.get_mean_log(), np.mean(train_periods[1:]))) ce_time = np.mean(train_periods[1:]) - save_model(exe, train_prog, os.path.join(args.save_dir, str(epoch_id))) + save_model(exe, train_prog, os.path.join(args.save_dir, str(epoch_id), "pointnet2_{}_seg".format(args.model))) # evaluation if not args.enable_ce: try: - test_pyreader.start() + test_loader.start() test_iter = 0 test_periods = [] while True: @@ -259,12 +264,12 @@ def train(): except fluid.core.EOFException: logger.info("[TEST] Epoch {} finished, {}average time: {:.2f}".format(epoch_id, test_stat.get_mean_log(), np.mean(test_periods[1:]))) finally: - test_pyreader.reset() + test_loader.reset() test_stat.reset() test_periods = [] finally: - train_pyreader.reset() + train_loader.reset() train_stat.reset() train_periods = [] diff --git a/PaddleCV/Paddle3D/PointRCNN/README.md b/PaddleCV/Paddle3D/PointRCNN/README.md index 5b2d82920bf702146589879291a2de9ececf1371..8c2e219c6ae28828e09fac9dce19e4856ee1412e 100644 --- a/PaddleCV/Paddle3D/PointRCNN/README.md +++ b/PaddleCV/Paddle3D/PointRCNN/README.md @@ -188,14 +188,14 @@ RPN训练checkpoints默认保存在`checkpoints/rpn`目录,也可以通过`--s python tools/generate_aug_scene.py --class_name 'Car' --split train --aug_times 4 ``` -保存RPN模型对离线增强数据的输出特征和ROI,可以通过参数`--ckpt_dir`来指定RPN训练最终权重保存路径,RPN权重默认保存在`checkpoints/rpn`目录。 +保存RPN模型对离线增强数据的输出特征和ROI,可以通过参数`--weights`来指定RPN训练最终权重保存路径,RPN权重默认保存在`checkpoints/rpn`目录。 保存输出特征和ROI时须指定`TEST.SPLIT`为`train_aug`,指定`TEST.RPN_POST_NMS_TOP_N`为`300`, `TEST.RPN_NMS_THRESH`为`0.85`。 通过`--output_dir`指定保存输出特征和ROI的路径,默认保存到`./output`目录。 ``` python eval.py --cfg=cfgs/default.yml \ --eval_mode=rpn \ - --ckpt_dir=./checkpoints/rpn/199 \ + --weights=./checkpoints/rpn/199 \ --save_rpn_feature \ --output_dir=output \ --set TEST.SPLIT train_aug TEST.RPN_POST_NMS_TOP_N 300 TEST.RPN_NMS_THRESH 0.85 @@ -260,13 +260,13 @@ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:`python -c 'import paddle; print(paddle. 2. 保存RPN模型对评估数据的输出特征和ROI -保存RPN模型对评估数据的输出特征和ROI命令如下,可以通过参数`--ckpt_dir`来指定RPN训练最终权重保存路径,RPN权重默认保存在`checkpoints/rpn`目录。 +保存RPN模型对评估数据的输出特征和ROI命令如下,可以通过参数`--weights`来指定RPN训练最终权重保存路径,RPN权重默认保存在`checkpoints/rpn`目录。 通过`--output_dir`指定保存输出特征和ROI的路径,默认保存到`./output`目录。 ``` python eval.py --cfg=cfgs/default.yml \ --eval_mode=rpn \ - --ckpt_dir=./checkpoints/rpn/199 \ + --weights=./checkpoints/rpn/199 \ --save_rpn_feature \ --output_dir=output/val ``` @@ -280,7 +280,7 @@ python eval.py --cfg=cfgs/default.yml \ ``` python eval.py --cfg=cfgs/default.yml \ --eval_mode=rcnn_offline \ - --ckpt_dir=./checkpoints/rcnn_offline/29 \ + --weights=./checkpoints/rcnn_offline/29 \ --rcnn_eval_roi_dir=output/val/detections/data \ --rcnn_eval_feature_dir=output/val/features \ --save_result diff --git a/PaddleCV/Paddle3D/PointRCNN/eval.py b/PaddleCV/Paddle3D/PointRCNN/eval.py index 7ee5d37f40bbee8a5486090b1ebda05f0d5928a8..d3e18c3e4b9b36abebae93276a93ff8ae102a7f6 100644 --- a/PaddleCV/Paddle3D/PointRCNN/eval.py +++ b/PaddleCV/Paddle3D/PointRCNN/eval.py @@ -59,10 +59,10 @@ def parse_args(): default=1, help='evaluation batch size, default 1') parser.add_argument( - '--ckpt_dir', + '--weights', type=str, default='checkpoints/199', - help='specify a ckpt directory to be evaluated if needed') + help='specify weights to be evaluated if needed') parser.add_argument( '--data_dir', type=str, @@ -146,7 +146,7 @@ def eval(): with fluid.unique_name.guard(): eval_model = PointRCNN(cfg, args.batch_size, True, 'TEST') eval_model.build() - eval_pyreader = eval_model.get_pyreader() + eval_loader = eval_model.get_loader() eval_feeds = eval_model.get_feeds() eval_outputs = eval_model.get_outputs() eval_prog = eval_prog.clone(True) @@ -164,13 +164,10 @@ def eval(): exe.run(startup) - # load checkpoint - assert os.path.isdir( - args.ckpt_dir), "ckpt_dir {} not a directory".format(args.ckpt_dir) - - def if_exist(var): - return os.path.exists(os.path.join(args.ckpt_dir, var.name)) - fluid.io.load_vars(exe, args.ckpt_dir, eval_prog, predicate=if_exist) + # load weights + assert os.path.exists("{}.pdparams".format(args.weights)), \ + "Given resume weight {}.pdparams not exist.".format(args.weights) + fluid.load(eval_prog, args.weights) kitti_feature_dir = os.path.join(args.output_dir, 'features') kitti_output_dir = os.path.join(args.output_dir, 'detections', 'data') @@ -207,7 +204,7 @@ def eval(): rcnn_eval_roi_dir=args.rcnn_eval_roi_dir, rcnn_eval_feature_dir=args.rcnn_eval_feature_dir) eval_reader = kitti_rcnn_reader.get_multiprocess_reader(args.batch_size, eval_feeds) - eval_pyreader.decorate_sample_list_generator(eval_reader, place) + eval_loader.set_sample_list_generator(eval_reader, place) thresh_list = [0.1, 0.3, 0.5, 0.7, 0.9] queue = multiprocessing.Queue(128) @@ -249,7 +246,7 @@ def eval(): p_list[-1].start() try: - eval_pyreader.start() + eval_loader.start() eval_iter = 0 start_time = time.time() @@ -336,7 +333,7 @@ def eval(): "run 'python3 tools/kitti_eval.py' to evaluate KITTI mAP.") finally: - eval_pyreader.reset() + eval_loader.reset() if __name__ == "__main__": diff --git a/PaddleCV/Paddle3D/PointRCNN/models/point_rcnn.py b/PaddleCV/Paddle3D/PointRCNN/models/point_rcnn.py index 890ef897405722f9cc1ba1d129bea2c80fce17a1..707de8c356e2a7478190d9699d87b12953307fff 100644 --- a/PaddleCV/Paddle3D/PointRCNN/models/point_rcnn.py +++ b/PaddleCV/Paddle3D/PointRCNN/models/point_rcnn.py @@ -39,47 +39,47 @@ class PointRCNN(object): self.num_points = self.cfg.RPN.NUM_POINTS self.prog = prog self.inputs = None - self.pyreader = None + self.loader = None def build_inputs(self): self.inputs = OrderedDict() if self.cfg.RPN.ENABLED: - self.inputs['sample_id'] = fluid.layers.data(name='sample_id', shape=[1], dtype='int32') - self.inputs['pts_input'] = fluid.layers.data(name='pts_input', shape=[self.num_points, 3], dtype='float32') - self.inputs['pts_rect'] = fluid.layers.data(name='pts_rect', shape=[self.num_points, 3], dtype='float32') - self.inputs['pts_features'] = fluid.layers.data(name='pts_features', shape=[self.num_points, 1], dtype='float32') - self.inputs['rpn_cls_label'] = fluid.layers.data(name='rpn_cls_label', shape=[self.num_points], dtype='int32') - self.inputs['rpn_reg_label'] = fluid.layers.data(name='rpn_reg_label', shape=[self.num_points, 7], dtype='float32') - self.inputs['gt_boxes3d'] = fluid.layers.data(name='gt_boxes3d', shape=[7], lod_level=1, dtype='float32') + self.inputs['sample_id'] = fluid.data(name='sample_id', shape=[None, 1], dtype='int32') + self.inputs['pts_input'] = fluid.data(name='pts_input', shape=[None, self.num_points, 3], dtype='float32') + self.inputs['pts_rect'] = fluid.data(name='pts_rect', shape=[None, self.num_points, 3], dtype='float32') + self.inputs['pts_features'] = fluid.data(name='pts_features', shape=[None, self.num_points, 1], dtype='float32') + self.inputs['rpn_cls_label'] = fluid.data(name='rpn_cls_label', shape=[None, self.num_points], dtype='int32') + self.inputs['rpn_reg_label'] = fluid.data(name='rpn_reg_label', shape=[None, self.num_points, 7], dtype='float32') + self.inputs['gt_boxes3d'] = fluid.data(name='gt_boxes3d', shape=[None, 7], lod_level=1, dtype='float32') if self.cfg.RCNN.ENABLED: if self.cfg.RCNN.ROI_SAMPLE_JIT: - self.inputs['sample_id'] = fluid.layers.data(name='sample_id', shape=[1], dtype='int32', append_batch_size=False) - self.inputs['rpn_xyz'] = fluid.layers.data(name='rpn_xyz', shape=[self.num_points, 3], dtype='float32', append_batch_size=False) - self.inputs['rpn_features'] = fluid.layers.data(name='rpn_features', shape=[self.num_points,128], dtype='float32', append_batch_size=False) - self.inputs['rpn_intensity'] = fluid.layers.data(name='rpn_intensity', shape=[self.num_points], dtype='float32', append_batch_size=False) - self.inputs['seg_mask'] = fluid.layers.data(name='seg_mask', shape=[self.num_points], dtype='float32', append_batch_size=False) - self.inputs['roi_boxes3d'] = fluid.layers.data(name='roi_boxes3d', shape=[-1, -1, 7], dtype='float32', append_batch_size=False, lod_level=0) - self.inputs['pts_depth'] = fluid.layers.data(name='pts_depth', shape=[self.num_points], dtype='float32', append_batch_size=False) - self.inputs['gt_boxes3d'] = fluid.layers.data(name='gt_boxes3d', shape=[-1, -1, 7], dtype='float32', append_batch_size=False, lod_level=0) + self.inputs['sample_id'] = fluid.data(name='sample_id', shape=[1], dtype='int32') + self.inputs['rpn_xyz'] = fluid.data(name='rpn_xyz', shape=[self.num_points, 3], dtype='float32') + self.inputs['rpn_features'] = fluid.data(name='rpn_features', shape=[self.num_points, 128], dtype='float32') + self.inputs['rpn_intensity'] = fluid.data(name='rpn_intensity', shape=[self.num_points], dtype='float32') + self.inputs['seg_mask'] = fluid.data(name='seg_mask', shape=[self.num_points], dtype='float32') + self.inputs['roi_boxes3d'] = fluid.data(name='roi_boxes3d', shape=[None, None, 7], dtype='float32', lod_level=0) + self.inputs['pts_depth'] = fluid.data(name='pts_depth', shape=[self.num_points], dtype='float32') + self.inputs['gt_boxes3d'] = fluid.data(name='gt_boxes3d', shape=[None, None, 7], dtype='float32', lod_level=0) else: - self.inputs['sample_id'] = fluid.layers.data(name='sample_id', shape=[-1], dtype='int32', append_batch_size=False) - self.inputs['pts_input'] = fluid.layers.data(name='pts_input', shape=[-1,512,133], dtype='float32', append_batch_size=False) - self.inputs['pts_feature'] = fluid.layers.data(name='pts_feature', shape=[-1,512,128], dtype='float32', append_batch_size=False) - self.inputs['roi_boxes3d'] = fluid.layers.data(name='roi_boxes3d', shape=[-1,7], dtype='float32', append_batch_size=False) + self.inputs['sample_id'] = fluid.data(name='sample_id', shape=[None], dtype='int32') + self.inputs['pts_input'] = fluid.data(name='pts_input', shape=[None, 512, 133], dtype='float32') + self.inputs['pts_feature'] = fluid.data(name='pts_feature', shape=[None, 512, 128], dtype='float32') + self.inputs['roi_boxes3d'] = fluid.data(name='roi_boxes3d', shape=[None,7], dtype='float32') if self.is_train: - self.inputs['cls_label'] = fluid.layers.data(name='cls_label', shape=[-1], dtype='float32', append_batch_size=False) - self.inputs['reg_valid_mask'] = fluid.layers.data(name='reg_valid_mask', shape=[-1], dtype='float32', append_batch_size=False) - self.inputs['gt_boxes3d_ct'] = fluid.layers.data(name='gt_boxes3d_ct', shape=[-1,7], dtype='float32', append_batch_size=False) - self.inputs['gt_of_rois'] = fluid.layers.data(name='gt_of_rois', shape=[-1,7], dtype='float32', append_batch_size=False) + self.inputs['cls_label'] = fluid.data(name='cls_label', shape=[None], dtype='float32') + self.inputs['reg_valid_mask'] = fluid.data(name='reg_valid_mask', shape=[None], dtype='float32') + self.inputs['gt_boxes3d_ct'] = fluid.data(name='gt_boxes3d_ct', shape=[None, 7], dtype='float32') + self.inputs['gt_of_rois'] = fluid.data(name='gt_of_rois', shape=[None, 7], dtype='float32') else: - self.inputs['roi_scores'] = fluid.layers.data(name='roi_scores', shape=[-1,], dtype='float32', append_batch_size=False) - self.inputs['gt_iou'] = fluid.layers.data(name='gt_iou', shape=[-1], dtype='float32', append_batch_size=False) - self.inputs['gt_boxes3d'] = fluid.layers.data(name='gt_boxes3d', shape=[-1,-1,7], dtype='float32', append_batch_size=False, lod_level=0) + self.inputs['roi_scores'] = fluid.data(name='roi_scores', shape=[None], dtype='float32') + self.inputs['gt_iou'] = fluid.data(name='gt_iou', shape=[None], dtype='float32') + self.inputs['gt_boxes3d'] = fluid.data(name='gt_boxes3d', shape=[None, None, 7], dtype='float32', lod_level=0) - self.pyreader = fluid.io.PyReader( + self.loader = fluid.io.DataLoader.from_generator( feed_list=list(self.inputs.values()), capacity=64, use_double_buffer=True, @@ -120,6 +120,6 @@ class PointRCNN(object): rcnn_loss, _, _ = self.rcnn.get_loss() return rpn_loss + rcnn_loss - def get_pyreader(self): - return self.pyreader + def get_loader(self): + return self.loader diff --git a/PaddleCV/Paddle3D/PointRCNN/train.py b/PaddleCV/Paddle3D/PointRCNN/train.py index 41a6f0981b5222b940eb23aca548fbf0672723ba..44e646773e830dee90bab17960f9adf62c4dc19b 100644 --- a/PaddleCV/Paddle3D/PointRCNN/train.py +++ b/PaddleCV/Paddle3D/PointRCNN/train.py @@ -22,7 +22,6 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.layers import control_flow -from paddle.fluid.contrib.extend_optimizer import extend_with_decoupled_weight_decay import paddle.fluid.layers.learning_rate_scheduler as lr_scheduler from models.point_rcnn import PointRCNN @@ -169,7 +168,7 @@ def train(): with fluid.unique_name.guard(): train_model = PointRCNN(cfg, args.batch_size, True, 'TRAIN') train_model.build() - train_pyreader = train_model.get_pyreader() + train_loader = train_model.get_loader() train_feeds = train_model.get_feeds() train_outputs = train_model.get_outputs() train_loss = train_outputs['loss'] @@ -179,7 +178,7 @@ def train(): decay_factor=1e-5, total_step=steps_per_epoch * args.epoch, warmup_pct=cfg.TRAIN.PCT_START, - train_program=train_prog, + train_prog=train_prog, startup_prog=startup, weight_decay=cfg.TRAIN.WEIGHT_DECAY, clip_norm=cfg.TRAIN.GRAD_NORM_CLIP) @@ -188,13 +187,13 @@ def train(): exe.run(startup) if args.resume: - assert os.path.exists(args.resume), \ - "Given resume weight dir {} not exist.".format(args.resume) - def if_exist(var): - logger.debug("{}: {}".format(var.name, os.path.exists(os.path.join(args.resume, var.name)))) - return os.path.exists(os.path.join(args.resume, var.name)) - fluid.io.load_vars( - exe, args.resume, predicate=if_exist, main_program=train_prog) + assert os.path.exists("{}.pdparams".format(args.resume)), \ + "Given resume weight {}.pdparams not exist.".format(args.resume) + assert os.path.exists("{}.pdopt".format(args.resume)), \ + "Given resume optimizer state {}.pdopt not exist.".format(args.resume) + assert os.path.exists("{}.pdmodel".format(args.resume)), \ + "Given resume model parameter list {}.pdmodel not exist.".format(args.resume) + fluid.load(train_prog, args.resume, exe) build_strategy = fluid.BuildStrategy() build_strategy.memory_optimize = False @@ -208,19 +207,19 @@ def train(): if os.path.isdir(path): shutil.rmtree(path) logger.info("Save model to {}".format(path)) - fluid.io.save_persistables(exe, path, prog) + fluid.save(prog, path) # get reader train_reader = kitti_rcnn_reader.get_multiprocess_reader(args.batch_size, train_feeds, proc_num=args.worker_num, drop_last=True) - train_pyreader.decorate_sample_list_generator(train_reader, place) + train_loader.set_sample_list_generator(train_reader, place) train_stat = Stat() for epoch_id in range(args.resume_epoch, args.epoch): try: - train_pyreader.start() + train_loader.start() train_iter = 0 train_periods = [] while True: @@ -241,7 +240,7 @@ def train(): train_stat.reset() train_periods = [] finally: - train_pyreader.reset() + train_loader.reset() if __name__ == "__main__": diff --git a/PaddleCV/Paddle3D/PointRCNN/utils/optimizer.py b/PaddleCV/Paddle3D/PointRCNN/utils/optimizer.py index e32d1df862de7692e520168a2b35f482535f3ac6..f19cdbc1b79f789bfa78c57babc21367785fddc6 100644 --- a/PaddleCV/Paddle3D/PointRCNN/utils/optimizer.py +++ b/PaddleCV/Paddle3D/PointRCNN/utils/optimizer.py @@ -79,7 +79,7 @@ def optimize(loss, decay_factor, total_step, warmup_pct, - train_program, + train_prog, startup_prog, weight_decay, clip_norm, @@ -105,11 +105,15 @@ def optimize(loss, param_list = dict() if weight_decay > 0: - for param in train_program.global_block().all_parameters(): + for param in train_prog.all_parameters(): param_list[param.name] = param * 1.0 param_list[param.name].stop_gradient = True - _, param_grads = optimizer.minimize(loss) + opt_param_list = [] + for var in train_prog.list_vars(): + if fluid.io.is_parameter(var): + opt_param_list.append(var.name) + _, param_grads = optimizer.minimize(loss, parameter_list=opt_param_list) if weight_decay > 0: for param, grad in param_grads: