From 411664bd5135163a1a74f8e0523f3e6eb59fdd23 Mon Sep 17 00:00:00 2001 From: LielinJiang Date: Thu, 14 May 2020 07:50:52 +0000 Subject: [PATCH] adapt import --- examples/bert/bert_classifier.py | 17 ++--- examples/bert_leveldb/bert_classifier.py | 17 ++--- examples/bmn/bmn_metric.py | 2 +- examples/bmn/eval.py | 2 +- examples/bmn/modeling.py | 6 +- examples/bmn/predict.py | 2 +- examples/bmn/reader.py | 2 +- examples/bmn/train.py | 2 +- examples/cyclegan/cyclegan.py | 4 +- examples/cyclegan/infer.py | 2 +- examples/cyclegan/test.py | 2 +- examples/cyclegan/train.py | 14 ++-- .../handwritten_number_recognition/mnist.py | 10 +-- .../image_classification/imagenet_dataset.py | 4 +- examples/image_classification/main.py | 10 +-- examples/ocr/eval.py | 4 +- examples/ocr/predict.py | 6 +- examples/ocr/seq2seq_attn.py | 6 +- examples/ocr/train.py | 4 +- examples/ocr/utility.py | 4 +- examples/sentiment_classification/models.py | 66 ++++++++++------- .../sentiment_classifier.py | 72 +++++++++---------- examples/seq2seq/predict.py | 2 +- examples/seq2seq/seq2seq_attn.py | 4 +- examples/seq2seq/seq2seq_base.py | 4 +- examples/seq2seq/train.py | 2 +- examples/seq2seq/utility.py | 4 +- examples/sequence_tagging/eval.py | 13 ++-- examples/sequence_tagging/predict.py | 10 +-- examples/sequence_tagging/train.py | 10 +-- examples/style-transfer/README.md | 6 +- examples/style-transfer/style_transfer.py | 6 +- examples/transformer/predict.py | 2 +- examples/transformer/train.py | 4 +- examples/transformer/transformer.py | 4 +- examples/tsm/infer.py | 38 +++++----- examples/tsm/main.py | 8 +-- examples/tsm/modeling.py | 4 +- examples/yolov3/infer.py | 48 +++++++++---- examples/yolov3/main.py | 6 +- examples/yolov3/modeling.py | 2 +- 41 files changed, 234 insertions(+), 201 deletions(-) diff --git a/examples/bert/bert_classifier.py b/examples/bert/bert_classifier.py index e3c9726..3d47ecb 100644 --- a/examples/bert/bert_classifier.py +++ b/examples/bert/bert_classifier.py @@ -14,14 +14,14 @@ """BERT fine-tuning in Paddle Dygraph Mode.""" import paddle.fluid as fluid -from hapi.metrics import Accuracy -from hapi.configure import Config -from hapi.text.bert import BertEncoder +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.configure import Config +from paddle.incubate.hapi.text.bert import BertEncoder from paddle.fluid.dygraph import Linear, Layer -from hapi.loss import SoftmaxWithCrossEntropy -from hapi.model import set_device, Model, Input -import hapi.text.tokenizer.tokenization as tokenization -from hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer +from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy +from paddle.incubate.hapi.model import set_device, Model, Input +import paddle.incubate.hapi.text.tokenizer.tokenization as tokenization +from paddle.incubate.hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer class ClsModelLayer(Model): @@ -157,7 +157,8 @@ def main(): labels, device=device) - cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) + cls_model.bert_layer.load( + "./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) # do train cls_model.fit(train_data=train_dataloader.dataloader, diff --git a/examples/bert_leveldb/bert_classifier.py b/examples/bert_leveldb/bert_classifier.py index 51b1192..02d112f 100644 --- a/examples/bert_leveldb/bert_classifier.py +++ b/examples/bert_leveldb/bert_classifier.py @@ -14,14 +14,14 @@ """BERT fine-tuning in Paddle Dygraph Mode.""" import paddle.fluid as fluid -from hapi.metrics import Accuracy -from hapi.configure import Config -from hapi.text.bert import BertEncoder +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.configure import Config +from paddle.incubate.hapi.text.bert import BertEncoder from paddle.fluid.dygraph import Linear, Layer -from hapi.loss import SoftmaxWithCrossEntropy -from hapi.model import set_device, Model, Input -import hapi.text.tokenizer.tokenization as tokenization -from hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer +from paddle.incubate.hapi.loss import SoftmaxWithCrossEntropy +from paddle.incubate.hapi.model import set_device, Model, Input +import paddle.incubate.hapi.text.tokenizer.tokenization as tokenization +from paddle.incubate.hapi.text.bert import BertConfig, BertDataLoader, BertInputExample, make_optimizer class ClsModelLayer(Model): @@ -159,7 +159,8 @@ def main(): labels, device=device) - cls_model.bert_layer.load("./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) + cls_model.bert_layer.load( + "./bert_uncased_L-12_H-768_A-12/bert", reset_optimizer=True) # do train cls_model.fit(train_data=train_dataloader.dataloader, diff --git a/examples/bmn/bmn_metric.py b/examples/bmn/bmn_metric.py index cbcad9a..e4ba8ae 100644 --- a/examples/bmn/bmn_metric.py +++ b/examples/bmn/bmn_metric.py @@ -20,7 +20,7 @@ import json sys.path.append('../') -from hapi.metrics import Metric +from paddle.incubate.hapi.metrics import Metric from bmn_utils import boundary_choose, bmn_post_processing diff --git a/examples/bmn/eval.py b/examples/bmn/eval.py index c8d4f26..3fd8aea 100644 --- a/examples/bmn/eval.py +++ b/examples/bmn/eval.py @@ -18,7 +18,7 @@ import sys import logging import paddle.fluid as fluid -from hapi.model import set_device, Input +from paddle.incubate.hapi.model import set_device, Input from modeling import bmn, BmnLoss from bmn_metric import BmnMetric diff --git a/examples/bmn/modeling.py b/examples/bmn/modeling.py index bfd65b3..5fac438 100644 --- a/examples/bmn/modeling.py +++ b/examples/bmn/modeling.py @@ -17,9 +17,9 @@ from paddle.fluid import ParamAttr import numpy as np import math -from hapi.model import Model -from hapi.loss import Loss -from hapi.download import get_weights_path_from_url +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.loss import Loss +from paddle.incubate.hapi.download import get_weights_path_from_url __all__ = ["BMN", "BmnLoss", "bmn"] diff --git a/examples/bmn/predict.py b/examples/bmn/predict.py index b984c06..e96e837 100644 --- a/examples/bmn/predict.py +++ b/examples/bmn/predict.py @@ -18,7 +18,7 @@ import os import logging import paddle.fluid as fluid -from hapi.model import set_device, Input +from paddle.incubate.hapi.model import set_device, Input from modeling import bmn, BmnLoss from bmn_metric import BmnMetric diff --git a/examples/bmn/reader.py b/examples/bmn/reader.py index d8f125e..36780b2 100644 --- a/examples/bmn/reader.py +++ b/examples/bmn/reader.py @@ -21,7 +21,7 @@ import sys sys.path.append('../') -from hapi.distributed import DistributedBatchSampler +from paddle.incubate.hapi.distributed import DistributedBatchSampler from paddle.io import Dataset, DataLoader logger = logging.getLogger(__name__) diff --git a/examples/bmn/train.py b/examples/bmn/train.py index 1e2bf44..2a25c1a 100644 --- a/examples/bmn/train.py +++ b/examples/bmn/train.py @@ -18,7 +18,7 @@ import logging import sys import os -from hapi.model import set_device, Input +from paddle.incubate.hapi.model import set_device, Input from reader import BmnDataset from config_utils import * diff --git a/examples/cyclegan/cyclegan.py b/examples/cyclegan/cyclegan.py index 076e13d..f6a2dae 100644 --- a/examples/cyclegan/cyclegan.py +++ b/examples/cyclegan/cyclegan.py @@ -19,8 +19,8 @@ from __future__ import print_function import numpy as np import paddle.fluid as fluid -from hapi.model import Model -from hapi.loss import Loss +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.loss import Loss from layers import ConvBN, DeConvBN diff --git a/examples/cyclegan/infer.py b/examples/cyclegan/infer.py index 2fb2b35..bbefaf6 100644 --- a/examples/cyclegan/infer.py +++ b/examples/cyclegan/infer.py @@ -25,7 +25,7 @@ from PIL import Image from scipy.misc import imsave import paddle.fluid as fluid -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from check import check_gpu, check_version from cyclegan import Generator, GeneratorCombine diff --git a/examples/cyclegan/test.py b/examples/cyclegan/test.py index 67f7183..ba7d5c5 100644 --- a/examples/cyclegan/test.py +++ b/examples/cyclegan/test.py @@ -22,7 +22,7 @@ import numpy as np from scipy.misc import imsave import paddle.fluid as fluid -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from check import check_gpu, check_version from cyclegan import Generator, GeneratorCombine diff --git a/examples/cyclegan/train.py b/examples/cyclegan/train.py index 4ca77dd..de9ed63 100644 --- a/examples/cyclegan/train.py +++ b/examples/cyclegan/train.py @@ -24,7 +24,7 @@ import time import paddle import paddle.fluid as fluid -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from check import check_gpu, check_version from cyclegan import Generator, Discriminator, GeneratorCombine, GLoss, DLoss @@ -78,12 +78,12 @@ def main(): g_AB.prepare(inputs=[input_A], device=FLAGS.device) g_BA.prepare(inputs=[input_B], device=FLAGS.device) - g.prepare(g_optimizer, GLoss(), inputs=[input_A, input_B], - device=FLAGS.device) - d_A.prepare(da_optimizer, DLoss(), inputs=[input_B, fake_B], - device=FLAGS.device) - d_B.prepare(db_optimizer, DLoss(), inputs=[input_A, fake_A], - device=FLAGS.device) + g.prepare( + g_optimizer, GLoss(), inputs=[input_A, input_B], device=FLAGS.device) + d_A.prepare( + da_optimizer, DLoss(), inputs=[input_B, fake_B], device=FLAGS.device) + d_B.prepare( + db_optimizer, DLoss(), inputs=[input_A, fake_A], device=FLAGS.device) if FLAGS.resume: g.load(FLAGS.resume) diff --git a/examples/handwritten_number_recognition/mnist.py b/examples/handwritten_number_recognition/mnist.py index 36db96c..a3b77da 100644 --- a/examples/handwritten_number_recognition/mnist.py +++ b/examples/handwritten_number_recognition/mnist.py @@ -19,12 +19,12 @@ import argparse from paddle import fluid from paddle.fluid.optimizer import Momentum -from hapi.datasets.mnist import MNIST as MnistDataset +from paddle.incubate.hapi.datasets.mnist import MNIST as MnistDataset -from hapi.model import Input, set_device -from hapi.loss import CrossEntropy -from hapi.metrics import Accuracy -from hapi.vision.models import LeNet +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.loss import CrossEntropy +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.vision.models import LeNet def main(): diff --git a/examples/image_classification/imagenet_dataset.py b/examples/image_classification/imagenet_dataset.py index 27c41d6..27a9009 100644 --- a/examples/image_classification/imagenet_dataset.py +++ b/examples/image_classification/imagenet_dataset.py @@ -18,8 +18,8 @@ import math import random import numpy as np -from hapi.datasets import DatasetFolder -from hapi.vision.transforms import transforms +from paddle.incubate.hapi.datasets import DatasetFolder +from paddle.incubate.hapi.vision.transforms import transforms from paddle import fluid diff --git a/examples/image_classification/main.py b/examples/image_classification/main.py index e5aea41..ff0c95b 100644 --- a/examples/image_classification/main.py +++ b/examples/image_classification/main.py @@ -27,11 +27,11 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.io import BatchSampler, DataLoader -from hapi.model import Input, set_device -from hapi.loss import CrossEntropy -from hapi.distributed import DistributedBatchSampler -from hapi.metrics import Accuracy -import hapi.vision.models as models +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.loss import CrossEntropy +from paddle.incubate.hapi.distributed import DistributedBatchSampler +from paddle.incubate.hapi.metrics import Accuracy +import paddle.incubate.hapi.vision.models as models from imagenet_dataset import ImageNetDataset diff --git a/examples/ocr/eval.py b/examples/ocr/eval.py index 1adffa5..6b5fd48 100644 --- a/examples/ocr/eval.py +++ b/examples/ocr/eval.py @@ -19,8 +19,8 @@ import functools import paddle.fluid.profiler as profiler import paddle.fluid as fluid -from hapi.model import Input, set_device -from hapi.vision.transforms import BatchCompose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.vision.transforms import BatchCompose from utility import add_arguments, print_arguments from utility import SeqAccuracy, LoggerCallBack, SeqBeamAccuracy diff --git a/examples/ocr/predict.py b/examples/ocr/predict.py index 242d4f8..f1eac38 100644 --- a/examples/ocr/predict.py +++ b/examples/ocr/predict.py @@ -25,9 +25,9 @@ from PIL import Image import paddle.fluid.profiler as profiler import paddle.fluid as fluid -from hapi.model import Input, set_device -from hapi.datasets.folder import ImageFolder -from hapi.vision.transforms import BatchCompose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.datasets.folder import ImageFolder +from paddle.incubate.hapi.vision.transforms import BatchCompose from utility import add_arguments, print_arguments from utility import postprocess, index2word diff --git a/examples/ocr/seq2seq_attn.py b/examples/ocr/seq2seq_attn.py index 66da91c..117749c 100644 --- a/examples/ocr/seq2seq_attn.py +++ b/examples/ocr/seq2seq_attn.py @@ -19,9 +19,9 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.layers import BeamSearchDecoder -from hapi.text import RNNCell, RNN, DynamicDecode -from hapi.model import Model -from hapi.loss import Loss +from paddle.incubate.hapi.text import RNNCell, RNN, DynamicDecode +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.loss import Loss class ConvBNPool(fluid.dygraph.Layer): diff --git a/examples/ocr/train.py b/examples/ocr/train.py index d72173d..b69c90e 100644 --- a/examples/ocr/train.py +++ b/examples/ocr/train.py @@ -24,8 +24,8 @@ import functools import paddle.fluid.profiler as profiler import paddle.fluid as fluid -from hapi.model import Input, set_device -from hapi.vision.transforms import BatchCompose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.vision.transforms import BatchCompose from utility import add_arguments, print_arguments from utility import SeqAccuracy, LoggerCallBack diff --git a/examples/ocr/utility.py b/examples/ocr/utility.py index d47b3f1..59b7f42 100644 --- a/examples/ocr/utility.py +++ b/examples/ocr/utility.py @@ -21,8 +21,8 @@ import numpy as np import paddle.fluid as fluid import six -from hapi.metrics import Metric -from hapi.callbacks import ProgBarLogger +from paddle.incubate.hapi.metrics import Metric +from paddle.incubate.hapi.callbacks import ProgBarLogger def print_arguments(args): diff --git a/examples/sentiment_classification/models.py b/examples/sentiment_classification/models.py index 313b928..7efaff5 100644 --- a/examples/sentiment_classification/models.py +++ b/examples/sentiment_classification/models.py @@ -15,13 +15,13 @@ import paddle.fluid as fluid from paddle.fluid.dygraph.nn import Linear, Embedding from paddle.fluid.dygraph.base import to_variable import numpy as np -from hapi.model import Model -from hapi.text.text import GRUEncoderLayer as BiGRUEncoder -from hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.text.text import GRUEncoderLayer as BiGRUEncoder +from paddle.incubate.hapi.text.test import BOWEncoder, CNNEncoder, GRUEncoder class CNN(Model): - def __init__(self, dict_dim, batch_size, seq_len): + def __init__(self, dict_dim, batch_size, seq_len): super(CNN, self).__init__() self.dict_dim = dict_dim self.emb_dim = 128 @@ -36,15 +36,19 @@ class CNN(Model): dict_size=self.dict_dim + 1, emb_dim=self.emb_dim, seq_len=self.seq_len, - filter_size= self.win_size, - num_filters= self.hid_dim, - hidden_dim= self.hid_dim, + filter_size=self.win_size, + num_filters=self.hid_dim, + hidden_dim=self.hid_dim, padding_idx=None, act='tanh') - self._fc1 = Linear(input_dim = self.hid_dim*self.seq_len, output_dim=self.fc_hid_dim, act="softmax") - self._fc_prediction = Linear(input_dim = self.fc_hid_dim, - output_dim = self.class_dim, - act="softmax") + self._fc1 = Linear( + input_dim=self.hid_dim * self.seq_len, + output_dim=self.fc_hid_dim, + act="softmax") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") def forward(self, inputs): conv_3 = self._encoder(inputs) @@ -69,11 +73,14 @@ class BOW(Model): padding_idx=None, bow_dim=self.hid_dim, seq_len=self.seq_len) - self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim, act="tanh") - self._fc2 = Linear(input_dim = self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") - self._fc_prediction = Linear(input_dim = self.fc_hid_dim, - output_dim = self.class_dim, - act="softmax") + self._fc1 = Linear( + input_dim=self.hid_dim, output_dim=self.hid_dim, act="tanh") + self._fc2 = Linear( + input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") def forward(self, inputs): bow_1 = self._encoder(inputs) @@ -94,10 +101,12 @@ class GRU(Model): self.class_dim = 2 self.batch_size = batch_size self.seq_len = seq_len - self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self._fc1 = Linear( + input_dim=self.hid_dim, output_dim=self.fc_hid_dim, act="tanh") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") self._encoder = GRUEncoder( dict_size=self.dict_dim + 1, emb_dim=self.emb_dim, @@ -112,7 +121,7 @@ class GRU(Model): prediction = self._fc_prediction(fc_1) return prediction - + class BiGRU(Model): def __init__(self, dict_dim, batch_size, seq_len): super(BiGRU, self).__init__() @@ -130,11 +139,13 @@ class BiGRU(Model): is_sparse=False) h_0 = np.zeros((self.batch_size, self.hid_dim), dtype="float32") h_0 = to_variable(h_0) - self._fc1 = Linear(input_dim = self.hid_dim, output_dim=self.hid_dim*3) - self._fc2 = Linear(input_dim = self.hid_dim*2, output_dim=self.fc_hid_dim, act="tanh") - self._fc_prediction = Linear(input_dim=self.fc_hid_dim, - output_dim=self.class_dim, - act="softmax") + self._fc1 = Linear(input_dim=self.hid_dim, output_dim=self.hid_dim * 3) + self._fc2 = Linear( + input_dim=self.hid_dim * 2, output_dim=self.fc_hid_dim, act="tanh") + self._fc_prediction = Linear( + input_dim=self.fc_hid_dim, + output_dim=self.class_dim, + act="softmax") self._encoder = BiGRUEncoder( grnn_hidden_dim=self.hid_dim, input_dim=self.hid_dim * 3, @@ -144,7 +155,8 @@ class BiGRU(Model): def forward(self, inputs): emb = self.embedding(inputs) - emb = fluid.layers.reshape(emb, shape=[self.batch_size, -1, self.hid_dim]) + emb = fluid.layers.reshape( + emb, shape=[self.batch_size, -1, self.hid_dim]) fc_1 = self._fc1(emb) encoded_vector = self._encoder(fc_1) encoded_vector = fluid.layers.tanh(encoded_vector) diff --git a/examples/sentiment_classification/sentiment_classifier.py b/examples/sentiment_classification/sentiment_classifier.py index b5f6a0d..7bd2d6a 100644 --- a/examples/sentiment_classification/sentiment_classifier.py +++ b/examples/sentiment_classification/sentiment_classifier.py @@ -13,14 +13,13 @@ # limitations under the License. """Sentiment Classification in Paddle Dygraph Mode. """ - from __future__ import print_function import numpy as np import paddle.fluid as fluid -from hapi.model import set_device, Model, CrossEntropy, Input -from hapi.configure import Config -from hapi.text.senta import SentaProcessor -from hapi.metrics import Accuracy +from paddle.incubate.hapi.model import set_device, Model, CrossEntropy, Input +from paddle.incubate.hapi.configure import Config +from paddle.incubate.hapi.text.senta import SentaProcessor +from paddle.incubate.hapi.metrics import Accuracy from models import CNN, BOW, GRU, BiGRU import json import os @@ -32,12 +31,14 @@ args.Print() device = set_device("gpu" if args.use_cuda else "cpu") dev_count = fluid.core.get_cuda_device_count() if args.use_cuda else 1 + def main(): if args.do_train: train() elif args.do_infer: infer() + def train(): fluid.enable_dygraph(device) processor = SentaProcessor( @@ -66,31 +67,28 @@ def train(): epoch=args.epoch, shuffle=False) if args.model_type == 'cnn_net': - model = CNN( args.vocab_size, args.batch_size, - args.padding_size) + model = CNN(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bow_net': - model = BOW( args.vocab_size, args.batch_size, - args.padding_size) + model = BOW(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'gru_net': - model = GRU( args.vocab_size, args.batch_size, - args.padding_size) + model = GRU(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bigru_net': - model = BiGRU( args.vocab_size, args.batch_size, - args.padding_size) - - optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr, parameter_list=model.parameters()) - + model = BiGRU(args.vocab_size, args.batch_size, args.padding_size) + + optimizer = fluid.optimizer.Adagrad( + learning_rate=args.lr, parameter_list=model.parameters()) + inputs = [Input([None, None], 'int64', name='doc')] labels = [Input([None, 1], 'int64', name='label')] - + model.prepare( optimizer, CrossEntropy(), - Accuracy(topk=(1,)), + Accuracy(topk=(1, )), inputs, labels, device=device) - + model.fit(train_data=train_data_generator, eval_data=eval_data_generator, batch_size=args.batch_size, @@ -99,6 +97,7 @@ def train(): eval_freq=args.eval_freq, save_freq=args.save_freq) + def infer(): fluid.enable_dygraph(device) processor = SentaProcessor( @@ -114,38 +113,37 @@ def infer(): epoch=1, shuffle=False) if args.model_type == 'cnn_net': - model_infer = CNN( args.vocab_size, args.batch_size, - args.padding_size) + model_infer = CNN(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bow_net': - model_infer = BOW( args.vocab_size, args.batch_size, - args.padding_size) + model_infer = BOW(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'gru_net': - model_infer = GRU( args.vocab_size, args.batch_size, - args.padding_size) + model_infer = GRU(args.vocab_size, args.batch_size, args.padding_size) elif args.model_type == 'bigru_net': - model_infer = BiGRU( args.vocab_size, args.batch_size, - args.padding_size) - + model_infer = BiGRU(args.vocab_size, args.batch_size, + args.padding_size) + print('Do inferring ...... ') inputs = [Input([None, None], 'int64', name='doc')] model_infer.prepare( - None, - CrossEntropy(), - Accuracy(topk=(1,)), - inputs, - device=device) + None, CrossEntropy(), Accuracy(topk=(1, )), inputs, device=device) model_infer.load(args.checkpoints, reset_optimizer=True) preds = model_infer.predict(test_data=infer_data_generator) preds = np.array(preds[0]).reshape((-1, 2)) if args.output_dir: with open(os.path.join(args.output_dir, 'predictions.json'), 'w') as w: - + for p in range(len(preds)): label = np.argmax(preds[p]) - result = json.dumps({'index': p, 'label': label, 'probs': preds[p].tolist()}) - w.write(result+'\n') - print('Predictions saved at '+os.path.join(args.output_dir, 'predictions.json')) + result = json.dumps({ + 'index': p, + 'label': label, + 'probs': preds[p].tolist() + }) + w.write(result + '\n') + print('Predictions saved at ' + os.path.join(args.output_dir, + 'predictions.json')) + if __name__ == '__main__': main() diff --git a/examples/seq2seq/predict.py b/examples/seq2seq/predict.py index 930c2e5..db8aef1 100644 --- a/examples/seq2seq/predict.py +++ b/examples/seq2seq/predict.py @@ -23,7 +23,7 @@ import paddle.fluid as fluid from paddle.fluid.layers.utils import flatten from paddle.fluid.io import DataLoader -from hapi.model import Input, set_device +from paddle.incubate.hapi.model import Input, set_device from args import parse_args from seq2seq_base import BaseInferModel from seq2seq_attn import AttentionInferModel diff --git a/examples/seq2seq/seq2seq_attn.py b/examples/seq2seq/seq2seq_attn.py index ce9cc08..37c84cb 100644 --- a/examples/seq2seq/seq2seq_attn.py +++ b/examples/seq2seq/seq2seq_attn.py @@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.layers import BeamSearchDecoder -from hapi.model import Model, Loss -from hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell +from paddle.incubate.hapi.model import Model, Loss +from paddle.incubate.hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell from seq2seq_base import Encoder diff --git a/examples/seq2seq/seq2seq_base.py b/examples/seq2seq/seq2seq_base.py index c28e2dc..6342ca1 100644 --- a/examples/seq2seq/seq2seq_base.py +++ b/examples/seq2seq/seq2seq_base.py @@ -19,8 +19,8 @@ from paddle.fluid.initializer import UniformInitializer from paddle.fluid.dygraph import Embedding, Linear, Layer from paddle.fluid.layers import BeamSearchDecoder -from hapi.model import Model, Loss -from hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell +from paddle.incubate.hapi.model import Model, Loss +from paddle.incubate.hapi.text import DynamicDecode, RNN, BasicLSTMCell, RNNCell class CrossEntropyCriterion(Loss): diff --git a/examples/seq2seq/train.py b/examples/seq2seq/train.py index 55a31d3..cd4fab9 100644 --- a/examples/seq2seq/train.py +++ b/examples/seq2seq/train.py @@ -21,7 +21,7 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid.io import DataLoader -from hapi.model import Input, set_device +from paddle.incubate.hapi.model import Input, set_device from args import parse_args from seq2seq_base import BaseModel, CrossEntropyCriterion from seq2seq_attn import AttentionModel diff --git a/examples/seq2seq/utility.py b/examples/seq2seq/utility.py index aa0dd4a..95a38ff 100644 --- a/examples/seq2seq/utility.py +++ b/examples/seq2seq/utility.py @@ -16,8 +16,8 @@ import math import paddle.fluid as fluid -from hapi.metrics import Metric -from hapi.callbacks import ProgBarLogger +from paddle.incubate.hapi.metrics import Metric +from paddle.incubate.hapi.callbacks import ProgBarLogger class TrainCallback(ProgBarLogger): diff --git a/examples/sequence_tagging/eval.py b/examples/sequence_tagging/eval.py index f58337b..2520d95 100644 --- a/examples/sequence_tagging/eval.py +++ b/examples/sequence_tagging/eval.py @@ -28,11 +28,11 @@ import numpy as np work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(work_dir, "../")) -from hapi.model import set_device, Input -from hapi.text.sequence_tagging import SeqTagging, ChunkEval, LacLoss -from hapi.text.sequence_tagging import LacDataset, LacDataLoader -from hapi.text.sequence_tagging import check_gpu, check_version -from hapi.text.sequence_tagging import PDConfig +from paddle.incubate.hapi.model import set_device, Input +from paddle.incubate.hapi.text.sequence_tagging import SeqTagging, ChunkEval, LacLoss +from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader +from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version +from paddle.incubate.hapi.text.sequence_tagging import PDConfig import paddle.fluid as fluid from paddle.fluid.layers.utils import flatten @@ -65,7 +65,8 @@ def main(args): device=place) model.load(args.init_from_checkpoint, skip_mismatch=True) - eval_result = model.evaluate(eval_dataset.dataloader, batch_size=args.batch_size) + eval_result = model.evaluate( + eval_dataset.dataloader, batch_size=args.batch_size) print("precison: %.5f" % (eval_result["precision"][0])) print("recall: %.5f" % (eval_result["recall"][0])) print("F1: %.5f" % (eval_result["F1"][0])) diff --git a/examples/sequence_tagging/predict.py b/examples/sequence_tagging/predict.py index e8802e4..5b1620d 100644 --- a/examples/sequence_tagging/predict.py +++ b/examples/sequence_tagging/predict.py @@ -29,11 +29,11 @@ import numpy as np work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(work_dir, "../")) -from hapi.text.sequence_tagging import SeqTagging -from hapi.model import Input, set_device -from hapi.text.sequence_tagging import LacDataset, LacDataLoader -from hapi.text.sequence_tagging import check_gpu, check_version -from hapi.text.sequence_tagging import PDConfig +from paddle.incubate.hapi.text.sequence_tagging import SeqTagging +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader +from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version +from paddle.incubate.hapi.text.sequence_tagging import PDConfig import paddle.fluid as fluid from paddle.fluid.layers.utils import flatten diff --git a/examples/sequence_tagging/train.py b/examples/sequence_tagging/train.py index 56507ad..69b76b1 100644 --- a/examples/sequence_tagging/train.py +++ b/examples/sequence_tagging/train.py @@ -28,11 +28,11 @@ import numpy as np work_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(work_dir, "../")) -from hapi.model import Input, set_device -from hapi.text.sequence_tagging import SeqTagging, LacLoss, ChunkEval -from hapi.text.sequence_tagging import LacDataset, LacDataLoader -from hapi.text.sequence_tagging import check_gpu, check_version -from hapi.text.sequence_tagging import PDConfig +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.text.sequence_tagging import SeqTagging, LacLoss, ChunkEval +from paddle.incubate.hapi.text.sequence_tagging import LacDataset, LacDataLoader +from paddle.incubate.hapi.text.sequence_tagging import check_gpu, check_version +from paddle.incubate.hapi.text.sequence_tagging import PDConfig import paddle.fluid as fluid from paddle.fluid.optimizer import AdamOptimizer diff --git a/examples/style-transfer/README.md b/examples/style-transfer/README.md index 84b57bd..46b2b30 100644 --- a/examples/style-transfer/README.md +++ b/examples/style-transfer/README.md @@ -32,10 +32,10 @@ gram_matrix = fluid.layers.matmul(tensor, fluid.layers.transpose(tensor, [1, 0]) import numpy as np import matplotlib.pyplot as plt -from hapi.model import Model, Loss +from paddle.incubate.hapi.model import Model, Loss -from hapi.vision.models import vgg16 -from hapi.vision.transforms import transforms +from paddle.incubate.hapi.vision.models import vgg16 +from paddle.incubate.hapi.vision.transforms import transforms from paddle import fluid from paddle.fluid.io import Dataset diff --git a/examples/style-transfer/style_transfer.py b/examples/style-transfer/style_transfer.py index ad0716f..854ca9c 100644 --- a/examples/style-transfer/style_transfer.py +++ b/examples/style-transfer/style_transfer.py @@ -3,10 +3,10 @@ import argparse import numpy as np import matplotlib.pyplot as plt -from hapi.model import Model, Loss +from paddle.incubate.hapi.model import Model, Loss -from hapi.vision.models import vgg16 -from hapi.vision.transforms import transforms +from paddle.incubate.hapi.vision.models import vgg16 +from paddle.incubate.hapi.vision.transforms import transforms from paddle import fluid from paddle.fluid.io import Dataset diff --git a/examples/transformer/predict.py b/examples/transformer/predict.py index f99bf77..5521d6c 100644 --- a/examples/transformer/predict.py +++ b/examples/transformer/predict.py @@ -25,7 +25,7 @@ from paddle.fluid.layers.utils import flatten from utils.configure import PDConfig from utils.check import check_gpu, check_version -from hapi.model import Input, set_device +from paddle.incubate.hapi.model import Input, set_device from reader import prepare_infer_input, Seq2SeqDataset, Seq2SeqBatchSampler from transformer import InferTransformer diff --git a/examples/transformer/train.py b/examples/transformer/train.py index 39bee1d..90f54d0 100644 --- a/examples/transformer/train.py +++ b/examples/transformer/train.py @@ -23,8 +23,8 @@ from paddle.io import DataLoader from utils.configure import PDConfig from utils.check import check_gpu, check_version -from hapi.model import Input, set_device -from hapi.callbacks import ProgBarLogger +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.callbacks import ProgBarLogger from reader import create_data_loader from transformer import Transformer, CrossEntropyCriterion diff --git a/examples/transformer/transformer.py b/examples/transformer/transformer.py index 30bb931..02405d0 100644 --- a/examples/transformer/transformer.py +++ b/examples/transformer/transformer.py @@ -20,8 +20,8 @@ import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.dygraph import Embedding, LayerNorm, Linear, Layer, to_variable from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay -from hapi.model import Model, CrossEntropy, Loss -from hapi.text import TransformerBeamSearchDecoder, DynamicDecode +from paddle.incubate.hapi.model import Model, CrossEntropy, Loss +from paddle.incubate.hapi.text import TransformerBeamSearchDecoder, DynamicDecode def position_encoding_init(n_position, d_pos_vec): diff --git a/examples/tsm/infer.py b/examples/tsm/infer.py index cac9745..2351eb2 100644 --- a/examples/tsm/infer.py +++ b/examples/tsm/infer.py @@ -19,8 +19,8 @@ import os import argparse import numpy as np -from hapi.model import Input, set_device -from hapi.vision.transforms import Compose +from paddle.incubate.hapi.model import Input, set_device +from paddle.incubate.hapi.vision.transforms import Compose from check import check_gpu, check_version from modeling import tsm_resnet50 @@ -36,18 +36,16 @@ def main(): device = set_device(FLAGS.device) fluid.enable_dygraph(device) if FLAGS.dynamic else None - transform = Compose([GroupScale(), - GroupCenterCrop(), - NormalizeImage()]) + transform = Compose([GroupScale(), GroupCenterCrop(), NormalizeImage()]) dataset = KineticsDataset( - pickle_file=FLAGS.infer_file, - label_list=FLAGS.label_list, - mode='test', - transform=transform) + pickle_file=FLAGS.infer_file, + label_list=FLAGS.label_list, + mode='test', + transform=transform) labels = dataset.label_list - model = tsm_resnet50(num_classes=len(labels), - pretrained=FLAGS.weights is None) + model = tsm_resnet50( + num_classes=len(labels), pretrained=FLAGS.weights is None) inputs = [Input([None, 8, 3, 224, 224], 'float32', name='image')] @@ -66,19 +64,23 @@ def main(): if __name__ == '__main__': parser = argparse.ArgumentParser("CNN training on TSM") parser.add_argument( - "--data", type=str, default='dataset/kinetics', + "--data", + type=str, + default='dataset/kinetics', help="path to dataset root directory") parser.add_argument( - "--device", type=str, default='gpu', - help="device to use, gpu or cpu") + "--device", type=str, default='gpu', help="device to use, gpu or cpu") parser.add_argument( - "-d", "--dynamic", action='store_true', - help="enable dygraph mode") + "-d", "--dynamic", action='store_true', help="enable dygraph mode") parser.add_argument( - "--label_list", type=str, default=None, + "--label_list", + type=str, + default=None, help="path to category index label list file") parser.add_argument( - "--infer_file", type=str, default=None, + "--infer_file", + type=str, + default=None, help="path to pickle file for inference") parser.add_argument( "-w", diff --git a/examples/tsm/main.py b/examples/tsm/main.py index deef9f6..d292d7d 100644 --- a/examples/tsm/main.py +++ b/examples/tsm/main.py @@ -22,10 +22,10 @@ import numpy as np from paddle import fluid from paddle.fluid.dygraph.parallel import ParallelEnv -from hapi.model import Model, Input, set_device -from hapi.loss import CrossEntropy -from hapi.metrics import Accuracy -from hapi.vision.transforms import Compose +from paddle.incubate.hapi.model import Model, Input, set_device +from paddle.incubate.hapi.loss import CrossEntropy +from paddle.incubate.hapi.metrics import Accuracy +from paddle.incubate.hapi.vision.transforms import Compose from modeling import tsm_resnet50 from check import check_gpu, check_version diff --git a/examples/tsm/modeling.py b/examples/tsm/modeling.py index aafd6a4..a42f426 100644 --- a/examples/tsm/modeling.py +++ b/examples/tsm/modeling.py @@ -17,8 +17,8 @@ import paddle.fluid as fluid from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear -from hapi.model import Model -from hapi.download import get_weights_path_from_url +from paddle.incubate.hapi.model import Model +from paddle.incubate.hapi.download import get_weights_path_from_url __all__ = ["TSM_ResNet", "tsm_resnet50"] diff --git a/examples/yolov3/infer.py b/examples/yolov3/infer.py index 7b6976c..a2640fe 100644 --- a/examples/yolov3/infer.py +++ b/examples/yolov3/infer.py @@ -18,13 +18,13 @@ from __future__ import print_function import os import argparse import numpy as np -from PIL import Image +from PIL import Image from paddle import fluid from paddle.fluid.optimizer import Momentum from paddle.io import DataLoader -from hapi.model import Model, Input, set_device +from paddle.incubate.hapi.model import Model, Input, set_device from modeling import yolov3_darknet53, YoloLoss from transforms import * @@ -64,16 +64,20 @@ def load_labels(label_list, with_background=True): def main(): device = set_device(FLAGS.device) fluid.enable_dygraph(device) if FLAGS.dynamic else None - - inputs = [Input([None, 1], 'int64', name='img_id'), - Input([None, 2], 'int32', name='img_shape'), - Input([None, 3, None, None], 'float32', name='image')] + + inputs = [ + Input( + [None, 1], 'int64', name='img_id'), Input( + [None, 2], 'int32', name='img_shape'), Input( + [None, 3, None, None], 'float32', name='image') + ] cat2name = load_labels(FLAGS.label_list, with_background=False) - model = yolov3_darknet53(num_classes=len(cat2name), - model_mode='test', - pretrained=FLAGS.weights is None) + model = yolov3_darknet53( + num_classes=len(cat2name), + model_mode='test', + pretrained=FLAGS.weights is None) model.prepare(inputs=inputs, device=FLAGS.device) @@ -82,7 +86,7 @@ def main(): # image preprocess orig_img = Image.open(FLAGS.infer_image).convert('RGB') - w, h = orig_img.size + w, h = orig_img.size img = orig_img.resize((608, 608), Image.BICUBIC) img = np.array(img).astype('float32') / 255.0 img -= np.array(IMAGE_MEAN) @@ -106,19 +110,33 @@ if __name__ == '__main__': parser.add_argument( "-d", "--dynamic", action='store_true', help="enable dygraph mode") parser.add_argument( - "--label_list", type=str, default=None, + "--label_list", + type=str, + default=None, help="path to category label list file") parser.add_argument( - "-t", "--draw_threshold", type=float, default=0.5, + "-t", + "--draw_threshold", + type=float, + default=0.5, help="threshold to reserve the result for visualization") parser.add_argument( - "-i", "--infer_image", type=str, default=None, + "-i", + "--infer_image", + type=str, + default=None, help="image path for inference") parser.add_argument( - "-o", "--output_dir", type=str, default='output', + "-o", + "--output_dir", + type=str, + default='output', help="directory to save inference result if --visualize is set") parser.add_argument( - "-w", "--weights", default=None, type=str, + "-w", + "--weights", + default=None, + type=str, help="path to weights for inference") FLAGS = parser.parse_args() print_arguments(FLAGS) diff --git a/examples/yolov3/main.py b/examples/yolov3/main.py index e3c773f..f90ae4f 100644 --- a/examples/yolov3/main.py +++ b/examples/yolov3/main.py @@ -25,9 +25,9 @@ from paddle import fluid from paddle.fluid.optimizer import Momentum from paddle.io import DataLoader -from hapi.model import Model, Input, set_device -from hapi.distributed import DistributedBatchSampler -from hapi.vision.transforms import Compose, BatchCompose +from paddle.incubate.hapi.model import Model, Input, set_device +from paddle.incubate.hapi.distributed import DistributedBatchSampler +from paddle.incubate.hapi.vision.transforms import Compose, BatchCompose from modeling import yolov3_darknet53, YoloLoss from coco import COCODataset diff --git a/examples/yolov3/modeling.py b/examples/yolov3/modeling.py index 982c0be..3beb977 100644 --- a/examples/yolov3/modeling.py +++ b/examples/yolov3/modeling.py @@ -23,7 +23,7 @@ from paddle.fluid.regularizer import L2Decay from hapi.model import Model from hapi.loss import Loss from hapi.download import get_weights_path_from_url -from hapi.vision.models import darknet53 +from darknet import darknet53 __all__ = ['YoloLoss', 'YOLOv3', 'yolov3_darknet53'] -- GitLab