diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..c1a58e1ea9d5b58996ec5f1306992af37cf9d581 --- /dev/null +++ b/.gitmodules @@ -0,0 +1,9 @@ +[submodule "fluid/SimNet"] + path = fluid/SimNet + url = https://github.com/baidu/AnyQ.git +[submodule "fluid/LAC"] + path = fluid/LAC + url = https://github.com/baidu/lac +[submodule "fluid/Senta"] + path = fluid/Senta + url = https://github.com/baidu/Senta diff --git a/README.md b/README.md index b7734fde853176a00e0422c96e1fcf1f3896fe6a..f08da24e2f6f0e6d2c7e3632bf27da3e0c20565e 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ PaddlePaddle provides a rich set of computational units to enable users to adopt - [fluid models](fluid): use PaddlePaddle's Fluid APIs. We especially recommend users to use Fluid models. -- [v2 models](v2): use PaddlePaddle's v2 APIs. +- [legacy models](legacy): use PaddlePaddle's v2 APIs. ## License diff --git a/fluid/DeepASR/model_utils/model.py b/fluid/DeepASR/model_utils/model.py index 8ae7e66fc781226c0316fceee65cfb805a1770ba..31892a7ad93eed0ba6ad6e7b53377e897f6df29d 100644 --- a/fluid/DeepASR/model_utils/model.py +++ b/fluid/DeepASR/model_utils/model.py @@ -2,7 +2,6 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function -import paddle.v2 as paddle import paddle.fluid as fluid diff --git a/fluid/LAC b/fluid/LAC new file mode 160000 index 0000000000000000000000000000000000000000..66660503bb6e8f34adc4715ccf42cad77ed46ded --- /dev/null +++ b/fluid/LAC @@ -0,0 +1 @@ +Subproject commit 66660503bb6e8f34adc4715ccf42cad77ed46ded diff --git a/fluid/Senta b/fluid/Senta new file mode 160000 index 0000000000000000000000000000000000000000..870651e257750f2c237f0b0bc9a27e5d062d1909 --- /dev/null +++ b/fluid/Senta @@ -0,0 +1 @@ +Subproject commit 870651e257750f2c237f0b0bc9a27e5d062d1909 diff --git a/fluid/SimNet b/fluid/SimNet new file mode 160000 index 0000000000000000000000000000000000000000..4dbe7f7b0e76c188eb7f448d104f0165f0a12229 --- /dev/null +++ b/fluid/SimNet @@ -0,0 +1 @@ +Subproject commit 4dbe7f7b0e76c188eb7f448d104f0165f0a12229 diff --git a/fluid/adversarial/tutorials/mnist_model.py b/fluid/adversarial/tutorials/mnist_model.py index 81ff7bdec7bedde2e5d1d1013ad95841cb766510..b1ebb0f88752df4c18ddd4ad96725f636bf261fc 100644 --- a/fluid/adversarial/tutorials/mnist_model.py +++ b/fluid/adversarial/tutorials/mnist_model.py @@ -1,7 +1,7 @@ """ CNN on mnist data using fluid api of paddlepaddle """ -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid diff --git a/fluid/adversarial/tutorials/mnist_tutorial_bim.py b/fluid/adversarial/tutorials/mnist_tutorial_bim.py index b490eba302106cf80df009d30e2babe48af465df..0524b908ea9ed028cf03aa9621c08fb8ef0cfc79 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_bim.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_bim.py @@ -8,7 +8,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.gradient_method import BIM diff --git a/fluid/adversarial/tutorials/mnist_tutorial_deepfool.py b/fluid/adversarial/tutorials/mnist_tutorial_deepfool.py index 2b12c81945859b42809e33ccd74ead53f4d4eb05..74ab5e8040022f4df96dd97fc77da9dc920d8f2b 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_deepfool.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_deepfool.py @@ -8,7 +8,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.deepfool import DeepFoolAttack diff --git a/fluid/adversarial/tutorials/mnist_tutorial_fgsm.py b/fluid/adversarial/tutorials/mnist_tutorial_fgsm.py index eeb7bc477ed090eac547fe3db50b08b2a513f0d7..178fc146dd636dce8fa2f82552a996dca239c55a 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_fgsm.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_fgsm.py @@ -8,7 +8,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import numpy as np import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.gradient_method import FGSM diff --git a/fluid/adversarial/tutorials/mnist_tutorial_ilcm.py b/fluid/adversarial/tutorials/mnist_tutorial_ilcm.py index 3d155e583415962f62ee7f581d32dd57a6b1cc1b..b12ffaab0367769d9bf9d58ec7396c8edd2487e9 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_ilcm.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_ilcm.py @@ -7,7 +7,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.gradient_method import ILCM diff --git a/fluid/adversarial/tutorials/mnist_tutorial_jsma.py b/fluid/adversarial/tutorials/mnist_tutorial_jsma.py index 070d2f5f5e3bcd50cdfb12f67e7c1a9453f31676..98829ec33afa1abc7646ac9297ec82c3de9b9eff 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_jsma.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_jsma.py @@ -7,7 +7,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.saliency import JSMA diff --git a/fluid/adversarial/tutorials/mnist_tutorial_lbfgs.py b/fluid/adversarial/tutorials/mnist_tutorial_lbfgs.py index 9b16c32bb6543409c487b31fe80d8cdc162b55d1..ba120d9d151573878372e394d2a03d93efccb4e9 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_lbfgs.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_lbfgs.py @@ -7,7 +7,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.lbfgs import LBFGS diff --git a/fluid/adversarial/tutorials/mnist_tutorial_mifgsm.py b/fluid/adversarial/tutorials/mnist_tutorial_mifgsm.py index ded7ef4b19cd4d99d2c3143f703e3d594058f705..8fc84db8f673c8da8eebf8b9d96f41a8712146c8 100644 --- a/fluid/adversarial/tutorials/mnist_tutorial_mifgsm.py +++ b/fluid/adversarial/tutorials/mnist_tutorial_mifgsm.py @@ -9,7 +9,7 @@ sys.path.append("..") import matplotlib.pyplot as plt import numpy as np import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from advbox.adversary import Adversary from advbox.attacks.gradient_method import MIFGSM diff --git a/fluid/deeplabv3+/.gitignore b/fluid/deeplabv3+/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..d086de2dafc52aa312b186bd593211be6f4ee60c --- /dev/null +++ b/fluid/deeplabv3+/.gitignore @@ -0,0 +1,3 @@ +deeplabv3plus_xception65_initialize.params +deeplabv3plus.params +deeplabv3plus.tar.gz diff --git a/fluid/deeplabv3+/README.md b/fluid/deeplabv3+/README.md index ccb38689d8db14586cc4b3608eed9be752829e31..9ff68ab8c1ded0eb41078886aac7a1ec49f02355 100644 --- a/fluid/deeplabv3+/README.md +++ b/fluid/deeplabv3+/README.md @@ -1,4 +1,4 @@ -DeepLab运行本目录下的程序示例需要使用PaddlePaddle develop最新版本。如果您的PaddlePaddle安装版本低于此要求,请按照[安装文档](http://www.paddlepaddle.org/docs/develop/documentation/zh/build_and_install/pip_install_cn.html)中的说明更新PaddlePaddle安装版本。 +DeepLab运行本目录下的程序示例需要使用PaddlePaddle Fluid v1.0.0版本或以上。如果您的PaddlePaddle安装版本低于此要求,请按照安装文档中的说明更新PaddlePaddle安装版本,如果使用GPU,该程序需要使用cuDNN v7版本。 ## 代码结构 @@ -41,10 +41,12 @@ data/cityscape/ 如果需要从头开始训练模型,用户需要下载我们的初始化模型 ``` wget http://paddlemodels.cdn.bcebos.com/deeplab/deeplabv3plus_xception65_initialize.tar.gz +tar -xf deeplabv3plus_xception65_initialize.tar.gz && rm deeplabv3plus_xception65_initialize.tar.gz ``` 如果需要最终训练模型进行fine tune或者直接用于预测,请下载我们的最终模型 ``` wget http://paddlemodels.cdn.bcebos.com/deeplab/deeplabv3plus.tar.gz +tar -xf deeplabv3plus.tar.gz && rm deeplabv3plus.tar.gz ``` @@ -70,11 +72,11 @@ python train.py --help ``` python ./train.py \ --batch_size=8 \ - --parallel=true + --parallel=true \ --train_crop_size=769 \ --total_step=90000 \ - --init_weights_path=$INIT_WEIGHTS_PATH \ - --save_weights_path=$SAVE_WEIGHTS_PATH \ + --init_weights_path=deeplabv3plus_xception65_initialize.params \ + --save_weights_path=output \ --dataset_path=$DATASET_PATH ``` @@ -82,11 +84,10 @@ python ./train.py \ 执行以下命令在`Cityscape`测试数据集上进行测试: ``` python ./eval.py \ - --init_weights_path=$INIT_WEIGHTS_PATH \ + --init_weights=deeplabv3plus.params \ --dataset_path=$DATASET_PATH ``` -需要通过选项`--model_path`指定模型文件。 -测试脚本的输出的评估指标为[mean IoU]()。 +需要通过选项`--model_path`指定模型文件。测试脚本的输出的评估指标为mean IoU。 ## 实验结果 diff --git a/fluid/deeplabv3+/eval.py b/fluid/deeplabv3+/eval.py index 2227a61836efe31a9120b5e68f74e6e177beaf92..624159a54d3ff55e29d9f5ac71c673e5e396d9e7 100644 --- a/fluid/deeplabv3+/eval.py +++ b/fluid/deeplabv3+/eval.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import os os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98' @@ -91,7 +94,7 @@ exe = fluid.Executor(place) exe.run(sp) if args.init_weights_path: - print "load from:", args.init_weights_path + print("load from:", args.init_weights_path) load_model() dataset = CityscapeDataset(args.dataset_path, 'val') @@ -118,7 +121,7 @@ for i, imgs, labels, names in batches: mp = (wrong + right) != 0 miou2 = np.mean((right[mp] * 1.0 / (right[mp] + wrong[mp]))) if args.verbose: - print 'step: %s, mIoU: %s' % (i + 1, miou2) + print('step: %s, mIoU: %s' % (i + 1, miou2)) else: - print '\rstep: %s, mIoU: %s' % (i + 1, miou2), + print('\rstep: %s, mIoU: %s' % (i + 1, miou2)) sys.stdout.flush() diff --git a/fluid/deeplabv3+/models.py b/fluid/deeplabv3+/models.py index 515d22f1092257c25497d240200437ed6647098f..77a1a005ebbd0b41e0d2fb2a5ad514e80be910cf 100644 --- a/fluid/deeplabv3+/models.py +++ b/fluid/deeplabv3+/models.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import paddle import paddle.fluid as fluid @@ -50,7 +53,7 @@ def append_op_result(result, name): def conv(*args, **kargs): kargs['param_attr'] = name_scope + 'weights' - if kargs.has_key('bias_attr') and kargs['bias_attr']: + if 'bias_attr' in kargs and kargs['bias_attr']: kargs['bias_attr'] = name_scope + 'biases' else: kargs['bias_attr'] = False @@ -62,7 +65,7 @@ def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None): N, C, H, W = input.shape if C % G != 0: - print "group can not divide channle:", C, G + print("group can not divide channle:", C, G) for d in range(10): for t in [d, -d]: if G + t <= 0: continue @@ -70,7 +73,7 @@ def group_norm(input, G, eps=1e-5, param_attr=None, bias_attr=None): G = G + t break if C % G == 0: - print "use group size:", G + print("use group size:", G) break assert C % G == 0 param_shape = (G, ) @@ -139,7 +142,7 @@ def seq_conv(input, channel, stride, filter, dilation=1, act=None): filter, stride, groups=input.shape[1], - padding=(filter / 2) * dilation, + padding=(filter // 2) * dilation, dilation=dilation) input = bn(input) if act: input = act(input) diff --git a/fluid/deeplabv3+/reader.py b/fluid/deeplabv3+/reader.py index f4141cbb2a156d98383714f85d73d84308c95c17..d420f0a264ba26be00cbe0d1d36130d565d7030d 100644 --- a/fluid/deeplabv3+/reader.py +++ b/fluid/deeplabv3+/reader.py @@ -1,5 +1,10 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import cv2 import numpy as np +import os +import six default_config = { "shuffle": True, @@ -30,7 +35,7 @@ def slice_with_pad(a, s, value=0): pr = 0 pads.append([pl, pr]) slices.append([l, r]) - slices = map(lambda x: slice(x[0], x[1], 1), slices) + slices = list(map(lambda x: slice(x[0], x[1], 1), slices)) a = a[slices] a = np.pad(a, pad_width=pads, mode='constant', constant_values=value) return a @@ -38,11 +43,17 @@ def slice_with_pad(a, s, value=0): class CityscapeDataset: def __init__(self, dataset_dir, subset='train', config=default_config): - import commands - label_dirname = dataset_dir + 'gtFine/' + subset - label_files = commands.getoutput( - "find %s -type f | grep labelTrainIds | sort" % - label_dirname).splitlines() + label_dirname = os.path.join(dataset_dir, 'gtFine/' + subset) + if six.PY2: + import commands + label_files = commands.getoutput( + "find %s -type f | grep labelTrainIds | sort" % + label_dirname).splitlines() + else: + import subprocess + label_files = subprocess.getstatusoutput( + "find %s -type f | grep labelTrainIds | sort" % + label_dirname)[-1].splitlines() self.label_files = label_files self.label_dirname = label_dirname self.index = 0 @@ -50,7 +61,7 @@ class CityscapeDataset: self.dataset_dir = dataset_dir self.config = config self.reset() - print "total number", len(label_files) + print("total number", len(label_files)) def reset(self, shuffle=False): self.index = 0 @@ -66,13 +77,14 @@ class CityscapeDataset: shape = self.config["crop_size"] while True: ln = self.label_files[self.index] - img_name = self.dataset_dir + 'leftImg8bit/' + self.subset + ln[len( - self.label_dirname):] + img_name = os.path.join( + self.dataset_dir, + 'leftImg8bit/' + self.subset + ln[len(self.label_dirname):]) img_name = img_name.replace('gtFine_labelTrainIds', 'leftImg8bit') label = cv2.imread(ln) img = cv2.imread(img_name) if img is None: - print "load img failed:", img_name + print("load img failed:", img_name) self.next_img() else: break @@ -128,5 +140,7 @@ class CityscapeDataset: from prefetch_generator import BackgroundGenerator batches = BackgroundGenerator(batches, 100) except: - print "You can install 'prefetch_generator' for acceleration of data reading." + print( + "You can install 'prefetch_generator' for acceleration of data reading." + ) return batches diff --git a/fluid/deeplabv3+/train.py b/fluid/deeplabv3+/train.py index d5dd2cff6925f400ad5c796cce041d44bf7a69c3..673db48c151fccbfd87d8a18942a830a12c8a2cd 100644 --- a/fluid/deeplabv3+/train.py +++ b/fluid/deeplabv3+/train.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import os os.environ['FLAGS_fraction_of_gpu_memory_to_use'] = '0.98' @@ -126,13 +129,12 @@ exe = fluid.Executor(place) exe.run(sp) if args.init_weights_path: - print "load from:", args.init_weights_path + print("load from:", args.init_weights_path) load_model() dataset = CityscapeDataset(args.dataset_path, 'train') if args.parallel: - print "Using ParallelExecutor." exe_p = fluid.ParallelExecutor( use_cuda=True, loss_name=loss_mean.name, main_program=tp) @@ -149,9 +151,9 @@ for i, imgs, labels, names in batches: 'label': labels}, fetch_list=[pred, loss_mean]) if i % 100 == 0: - print "Model is saved to", args.save_weights_path + print("Model is saved to", args.save_weights_path) save_model() - print "step %s, loss: %s" % (i, np.mean(retv[1])) + print("step %s, loss: %s" % (i, np.mean(retv[1]))) -print "Training done. Model is saved to", args.save_weights_path +print("Training done. Model is saved to", args.save_weights_path) save_model() diff --git a/fluid/face_detection/.gitignore b/fluid/face_detection/.gitignore index 0636bd5b2995e0a0fa27fe54be6ccbbb78074dca..bc0cb72626f7ba7db9334dd03cf9b24204ee0386 100644 --- a/fluid/face_detection/.gitignore +++ b/fluid/face_detection/.gitignore @@ -10,3 +10,4 @@ output* pred eval_tools box* +PyramidBox_WiderFace* diff --git a/fluid/face_detection/pyramidbox.py b/fluid/face_detection/pyramidbox.py index 4012d77cb7b99c36793807f173e38062f1b846ad..bece77af4247075a9bcd2e5ba5915e8446018e27 100644 --- a/fluid/face_detection/pyramidbox.py +++ b/fluid/face_detection/pyramidbox.py @@ -427,6 +427,7 @@ class PyramidBox(object): overlap_threshold=0.35, neg_overlap=0.35) loss = fluid.layers.reduce_sum(loss) + loss.persistable = True return loss def train(self): diff --git a/fluid/face_detection/reader.py b/fluid/face_detection/reader.py index ae90ad6b51db274446785dd6d9def57a606747d2..ea0f293dbfae57920fb9fdd2427ff273b90f8db9 100644 --- a/fluid/face_detection/reader.py +++ b/fluid/face_detection/reader.py @@ -285,7 +285,8 @@ def train(settings, try: enqueuer = GeneratorEnqueuer( train_generator(settings, file_list, batch_size, shuffle), - use_multiprocessing=use_multiprocessing) + use_multiprocessing=use_multiprocessing, + wait_time=0.5) enqueuer.start(max_queue_size=max_queue, workers=num_workers) generator_output = None while True: @@ -294,7 +295,7 @@ def train(settings, generator_output = enqueuer.queue.get() break else: - time.sleep(0.02) + time.sleep(0.5) yield generator_output generator_output = None finally: diff --git a/fluid/face_detection/train.py b/fluid/face_detection/train.py index 13744562c9d1814d457af20e3185d2d3c7a22fb7..67cec03b95ba5ffe1a5230c287bd12a49b90bb34 100644 --- a/fluid/face_detection/train.py +++ b/fluid/face_detection/train.py @@ -167,7 +167,7 @@ def train(args, config, train_params, train_file_list): shutil.rmtree(model_path) print('save models to %s' % (model_path)) - fluid.io.save_persistables(exe, model_path) + fluid.io.save_persistables(exe, model_path, main_program=program) train_py_reader.start() try: @@ -189,13 +189,13 @@ def train(args, config, train_params, train_file_list): fetch_vars = [np.mean(np.array(v)) for v in fetch_vars] if batch_id % 10 == 0: if not args.use_pyramidbox: - print("Pass {0}, batch {1}, loss {2}, time {3}".format( + print("Pass {:d}, batch {:d}, loss {:.6f}, time {:.5f}".format( pass_id, batch_id, fetch_vars[0], start_time - prev_start_time)) else: - print("Pass {0}, batch {1}, face loss {2}, " \ - "head loss {3}, " \ - "time {4}".format(pass_id, + print("Pass {:d}, batch {:d}, face loss {:.6f}, " \ + "head loss {:.6f}, " \ + "time {:.5f}".format(pass_id, batch_id, fetch_vars[0], fetch_vars[1], start_time - prev_start_time)) if pass_id % 1 == 0 or pass_id == epoc_num - 1: diff --git a/fluid/face_detection/widerface_eval.py b/fluid/face_detection/widerface_eval.py index 2a1addd1ed3313f8bb472bde2dad7fe90dd1c591..dd3a1c059ab1a6d3ed28a2cc48fdd377ed980fad 100644 --- a/fluid/face_detection/widerface_eval.py +++ b/fluid/face_detection/widerface_eval.py @@ -82,9 +82,6 @@ def save_widerface_bboxes(image_path, bboxes_scores, output_dir): image_name = image_path.split('/')[-1] image_class = image_path.split('/')[-2] - image_name = image_name.encode('utf-8') - image_class = image_class.encode('utf-8') - odir = os.path.join(output_dir, image_class) if not os.path.exists(odir): os.makedirs(odir) diff --git a/fluid/faster_rcnn/profile.py b/fluid/faster_rcnn/profile.py index c59cad111209ea774cfdd3988eeb7bb5f3561334..fd316302bd1259ca84e9077e9a2702c7065ca19b 100644 --- a/fluid/faster_rcnn/profile.py +++ b/fluid/faster_rcnn/profile.py @@ -109,7 +109,7 @@ def train(cfg): for batch_id in range(iterations): start_time = time.time() - data = train_reader().next() + data = next(train_reader()) end_time = time.time() reader_time.append(end_time - start_time) start_time = time.time() diff --git a/fluid/faster_rcnn/roidbs.py b/fluid/faster_rcnn/roidbs.py index d5d0d22211b5e676d58aa40235348a25eac60666..81ca366b5a6391a0579e4eb3aff5ffa94cbca1c0 100644 --- a/fluid/faster_rcnn/roidbs.py +++ b/fluid/faster_rcnn/roidbs.py @@ -26,7 +26,6 @@ from __future__ import print_function from __future__ import unicode_literals import copy -import cPickle as pickle import logging import numpy as np import os diff --git a/fluid/gan/c_gan/c_gan.py b/fluid/gan/c_gan/c_gan.py index 5a8b6f7e8defc414ea1e5865fbc0e63f9ffe9e09..1184e9dc8e7c0c28e246a678ab057dacf1bc2fed 100644 --- a/fluid/gan/c_gan/c_gan.py +++ b/fluid/gan/c_gan/c_gan.py @@ -12,8 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import sys import os +import six import argparse import functools import matplotlib @@ -102,7 +106,7 @@ def train(args): noise_data = np.random.uniform( low=-1.0, high=1.0, size=[args.batch_size, NOISE_SIZE]).astype('float32') - real_image = np.array(map(lambda x: x[0], data)).reshape( + real_image = np.array(list(map(lambda x: x[0], data))).reshape( -1, 784).astype('float32') conditions_data = np.array([x[1] for x in data]).reshape( [-1, 1]).astype("float32") @@ -138,7 +142,7 @@ def train(args): d_loss_np = [d_loss_1[0][0], d_loss_2[0][0]] - for _ in xrange(NUM_TRAIN_TIMES_OF_DG): + for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG): noise_data = np.random.uniform( low=-1.0, high=1.0, size=[args.batch_size, NOISE_SIZE]).astype('float32') @@ -159,7 +163,7 @@ def train(args): total_images = np.concatenate([real_image, generated_images]) fig = plot(total_images) msg = "Epoch ID={0}\n Batch ID={1}\n D-Loss={2}\n DG-Loss={3}\n gen={4}".format( - pass_id, batch_id, d_loss_np, dg_loss_np, + pass_id, batch_id, np.mean(d_loss_np), dg_loss_np, check(generated_images)) print(msg) plt.title(msg) diff --git a/fluid/gan/c_gan/dc_gan.py b/fluid/gan/c_gan/dc_gan.py index cca135725b4cae64582515019b5e9bf051fc8e50..441d37ac55886606c5d20168a234ce81ea6c852e 100644 --- a/fluid/gan/c_gan/dc_gan.py +++ b/fluid/gan/c_gan/dc_gan.py @@ -12,11 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import sys import os import argparse import functools import matplotlib +import six import numpy as np import paddle import paddle.fluid as fluid @@ -98,7 +102,7 @@ def train(args): noise_data = np.random.uniform( low=-1.0, high=1.0, size=[args.batch_size, NOISE_SIZE]).astype('float32') - real_image = np.array(map(lambda x: x[0], data)).reshape( + real_image = np.array(list(map(lambda x: x[0], data))).reshape( -1, 784).astype('float32') real_labels = np.ones( shape=[real_image.shape[0], 1], dtype='float32') @@ -128,7 +132,7 @@ def train(args): d_loss_np = [d_loss_1[0][0], d_loss_2[0][0]] - for _ in xrange(NUM_TRAIN_TIMES_OF_DG): + for _ in six.moves.xrange(NUM_TRAIN_TIMES_OF_DG): noise_data = np.random.uniform( low=-1.0, high=1.0, size=[args.batch_size, NOISE_SIZE]).astype('float32') @@ -146,7 +150,7 @@ def train(args): fig = plot(total_images) msg = "Epoch ID={0} Batch ID={1} D-Loss={2} DG-Loss={3}\n gen={4}".format( pass_id, batch_id, - np.sum(d_loss_np), dg_loss_np, check(generated_images)) + np.mean(d_loss_np), dg_loss_np, check(generated_images)) print(msg) plt.title(msg) plt.savefig( diff --git a/fluid/gan/c_gan/network.py b/fluid/gan/c_gan/network.py index 6a0dc073830e8dbeb28c7e9f96ae4795a0ab7fa8..93f021dbe35ca001a1793234fa7746a8ec7f57e3 100644 --- a/fluid/gan/c_gan/network.py +++ b/fluid/gan/c_gan/network.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import paddle import paddle.fluid as fluid from utility import get_parent_function_name @@ -104,13 +107,13 @@ def D_cond(image, y): def G_cond(z, y): s_h, s_w = output_height, output_width - s_h2, s_h4 = int(s_h / 2), int(s_h / 4) - s_w2, s_w4 = int(s_w / 2), int(s_w / 4) + s_h2, s_h4 = int(s_h // 2), int(s_h // 4) + s_w2, s_w4 = int(s_w // 2), int(s_w // 4) yb = fluid.layers.reshape(y, [-1, y_dim, 1, 1]) #NCHW z = fluid.layers.concat([z, y], 1) - h0 = bn(fc(z, gfc_dim / 2), act='relu') + h0 = bn(fc(z, gfc_dim // 2), act='relu') h0 = fluid.layers.concat([h0, y], 1) h1 = bn(fc(h0, gf_dim * 2 * s_h4 * s_w4), act='relu') @@ -134,8 +137,8 @@ def D(x): def G(x): x = bn(fc(x, gfc_dim)) - x = bn(fc(x, gf_dim * 2 * img_dim / 4 * img_dim / 4)) - x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim / 4, img_dim / 4]) + x = bn(fc(x, gf_dim * 2 * img_dim // 4 * img_dim // 4)) + x = fluid.layers.reshape(x, [-1, gf_dim * 2, img_dim // 4, img_dim // 4]) x = deconv(x, gf_dim * 2, act='relu', output_size=[14, 14]) x = deconv(x, 1, filter_size=5, padding=2, act='tanh', output_size=[28, 28]) x = fluid.layers.reshape(x, shape=[-1, 28 * 28]) diff --git a/fluid/gan/c_gan/utility.py b/fluid/gan/c_gan/utility.py index b9cd4711b555a9947634f5c0d205ebff8cc77b8e..3c64abc246f1ff44d0cb3953aae5dc84d2f46ec9 100644 --- a/fluid/gan/c_gan/utility.py +++ b/fluid/gan/c_gan/utility.py @@ -1,8 +1,12 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import math import distutils.util import numpy as np import inspect import matplotlib +import six matplotlib.use('agg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec @@ -54,7 +58,7 @@ def print_arguments(args): :type args: argparse.Namespace """ print("----------- Configuration Arguments -----------") - for arg, value in sorted(vars(args).iteritems()): + for arg, value in sorted(six.iteritems(vars(args))): print("%s: %s" % (arg, value)) print("------------------------------------------------") diff --git a/fluid/gan/cycle_gan/data_reader.py b/fluid/gan/cycle_gan/data_reader.py index ee57ec6c55f984eecdc5c4ac669e393c09fb6eec..4cbf81c031a72bf66d4b356533e4e14fedbcc7c2 100644 --- a/fluid/gan/cycle_gan/data_reader.py +++ b/fluid/gan/cycle_gan/data_reader.py @@ -1,7 +1,9 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import os from PIL import Image import numpy as np -from itertools import izip A_LIST_FILE = "./data/horse2zebra/trainA.txt" B_LIST_FILE = "./data/horse2zebra/trainB.txt" @@ -70,11 +72,3 @@ def b_test_reader(): Reader of images with B style for test. """ return reader_creater(B_TEST_LIST_FILE, cycle=False, return_name=True) - - -if __name__ == "__main__": - for A, B in izip(a_test_reader()(), a_test_reader()()): - print A[0].shape - print A[1] - print B[0].shape - print B[1] diff --git a/fluid/gan/cycle_gan/train.py b/fluid/gan/cycle_gan/train.py index ff38ed7366343ab6c605139712d20fdb0a0dbbae..b9ee2a08a3446c6a2369f4148a0a644aadad4d37 100644 --- a/fluid/gan/cycle_gan/train.py +++ b/fluid/gan/cycle_gan/train.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function import data_reader import os import random @@ -9,7 +12,6 @@ import paddle.fluid as fluid import numpy as np from paddle.fluid import core from trainer import * -from itertools import izip from scipy.misc import imsave import paddle.fluid.profiler as profiler from utility import add_arguments, print_arguments, ImagePool @@ -66,7 +68,7 @@ def train(args): if not os.path.exists(out_path): os.makedirs(out_path) i = 0 - for data_A, data_B in izip(A_test_reader(), B_test_reader()): + for data_A, data_B in zip(A_test_reader(), B_test_reader()): A_name = data_A[1] B_name = data_B[1] tensor_A = core.LoDTensor() @@ -114,7 +116,7 @@ def train(args): exe, out_path + "/d_a", main_program=d_A_trainer.program) fluid.io.save_persistables( exe, out_path + "/d_b", main_program=d_B_trainer.program) - print "saved checkpoint to [%s]" % out_path + print("saved checkpoint to {}".format(out_path)) sys.stdout.flush() def init_model(): @@ -128,7 +130,7 @@ def train(args): exe, args.init_model + "/d_a", main_program=d_A_trainer.program) fluid.io.load_persistables( exe, args.init_model + "/d_b", main_program=d_B_trainer.program) - print "Load model from [%s]" % args.init_model + print("Load model from {}".format(args.init_model)) if args.init_model: init_model() @@ -136,8 +138,8 @@ def train(args): for epoch in range(args.epoch): batch_id = 0 for i in range(max_images_num): - data_A = A_reader.next() - data_B = B_reader.next() + data_A = next(A_reader) + data_B = next(B_reader) tensor_A = core.LoDTensor() tensor_B = core.LoDTensor() tensor_A.set(data_A, place) @@ -174,9 +176,9 @@ def train(args): feed={"input_A": tensor_A, "fake_pool_A": fake_pool_A}) - print "epoch[%d]; batch[%d]; g_A_loss: %s; d_B_loss: %s; g_B_loss: %s; d_A_loss: %s;" % ( + print("epoch{}; batch{}; g_A_loss: {}; d_B_loss: {}; g_B_loss: {}; d_A_loss: {};".format( epoch, batch_id, g_A_loss[0], d_B_loss[0], g_B_loss[0], - d_A_loss[0]) + d_A_loss[0])) sys.stdout.flush() batch_id += 1 diff --git a/fluid/gan/cycle_gan/trainer.py b/fluid/gan/cycle_gan/trainer.py index d1e30dfe82f43a47f60a34dd9b10eac46758e90d..84d4c87a53d1b1d3f9b1ef794f373f50dd6da175 100644 --- a/fluid/gan/cycle_gan/trainer.py +++ b/fluid/gan/cycle_gan/trainer.py @@ -1,3 +1,6 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function from model import * import paddle.fluid as fluid diff --git a/fluid/gan/cycle_gan/utility.py b/fluid/gan/cycle_gan/utility.py index ca5ceef0b5e19e159de1440e6332784782135727..c7a21852ef8dc65b84bf6aa5b253445ee76cf44e 100644 --- a/fluid/gan/cycle_gan/utility.py +++ b/fluid/gan/cycle_gan/utility.py @@ -17,6 +17,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function import distutils.util +import six import random import glob import numpy as np @@ -39,7 +40,7 @@ def print_arguments(args): :type args: argparse.Namespace """ print("----------- Configuration Arguments -----------") - for arg, value in sorted(vars(args).iteritems()): + for arg, value in sorted(six.iteritems(vars(args))): print("%s: %s" % (arg, value)) print("------------------------------------------------") diff --git a/fluid/icnet/infer.py b/fluid/icnet/infer.py index f93469f157660a4c5adae7d4ff2bc9b315bce41e..9f556fc00d5fe0346cb063d90a770ee7c9ab32b5 100644 --- a/fluid/icnet/infer.py +++ b/fluid/icnet/infer.py @@ -8,7 +8,7 @@ import os import cv2 import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle from icnet import icnet from utils import add_arguments, print_arguments, get_feeder_data from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter @@ -111,10 +111,10 @@ def infer(args): for line in open(args.images_list): image_file = args.images_path + "/" + line.strip() filename = os.path.basename(image_file) - image = paddle.image.load_image( + image = paddle.dataset.image.load_image( image_file, is_color=True).astype("float32") image -= IMG_MEAN - img = paddle.image.to_chw(image)[np.newaxis, :] + img = paddle.dataset.image.to_chw(image)[np.newaxis, :] image_t = fluid.core.LoDTensor() image_t.set(img, place) result = exe.run(inference_program, diff --git a/fluid/image_classification/README_cn.md b/fluid/image_classification/README_cn.md index 937dd148c70ccb86a24c9ad7fd1705de3d5b4678..12b4d3ef3d22aa4b78abc6138adac1445ae18e42 100644 --- a/fluid/image_classification/README_cn.md +++ b/fluid/image_classification/README_cn.md @@ -14,7 +14,7 @@ ## 安装 -在当前目录下运行样例代码需要PadddlePaddle Fluid的v0.13.0或以上的版本。如果你的运行环境中的PaddlePaddle低于此版本,请根据[安装文档](http://www.paddlepaddle.org/docs/develop/documentation/zh/build_and_install/pip_install_cn.html)中的说明来更新PaddlePaddle。 +在当前目录下运行样例代码需要PadddlePaddle Fluid的v0.13.0或以上的版本。如果你的运行环境中的PaddlePaddle低于此版本,请根据安装文档中的说明来更新PaddlePaddle。 ## 数据准备 diff --git a/fluid/image_classification/caffe2fluid/examples/mnist/evaluate.py b/fluid/image_classification/caffe2fluid/examples/mnist/evaluate.py index 946fa943726b39c4e8e8dfce9f41c87a06ee1912..55b053e85b9f02e218511fed477757ffb3feee23 100644 --- a/fluid/image_classification/caffe2fluid/examples/mnist/evaluate.py +++ b/fluid/image_classification/caffe2fluid/examples/mnist/evaluate.py @@ -8,7 +8,7 @@ import sys import os import numpy as np import paddle.fluid as fluid -import paddle.v2 as paddle +import paddle def test_model(exe, test_program, fetch_list, test_reader, feeder): diff --git a/fluid/image_classification/reader.py b/fluid/image_classification/reader.py index 50be1cdef5ff3ad612d4d447a87174a767867a02..639b0b01200e3d81c57d75e560d6911f3e74b710 100644 --- a/fluid/image_classification/reader.py +++ b/fluid/image_classification/reader.py @@ -140,7 +140,7 @@ def _reader_creator(file_list, # distributed mode if the env var `PADDLE_TRAINING_ROLE` exits trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) trainer_count = int(os.getenv("PADDLE_TRAINERS", "1")) - per_node_lines = len(full_lines) / trainer_count + per_node_lines = len(full_lines) // trainer_count lines = full_lines[trainer_id * per_node_lines:(trainer_id + 1) * per_node_lines] print( diff --git a/fluid/machine_reading_comprehesion/README.md b/fluid/machine_reading_comprehesion/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b46d54cf41df66fc26e0f1c597e5cfb7b32e11cd --- /dev/null +++ b/fluid/machine_reading_comprehesion/README.md @@ -0,0 +1,69 @@ +# Abstract +Dureader is an end-to-end neural network model for machine reading comprehesion style question answering, which aims to anser questions from given passages. We first match the question and passage with a bidireactional attention flow network to obtrain the question-aware passages represenation. Then we employ a pointer network to locate the positions of answers from passages. Our experimental evalutions show that DuReader model achieves the state-of-the-art results in DuReader Dadaset. +# Dataset +DuReader Dataset is a new large-scale real-world and human sourced MRC dataset in Chinese. DuReader focuses on real-world open-domain question answering. The advantages of DuReader over existing datasets are concluded as follows: + - Real question + - Real article + - Real answer + - Real application scenario + - Rich annotation + +# Network +DuReader is inspired by 3 classic reading comprehension models([BiDAF](https://arxiv.org/abs/1611.01603), [Match-LSTM](https://arxiv.org/abs/1608.07905), [R-NET](https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf)). + +DuReader model is a hierarchical multi-stage process and consists of five layers + +- **Word Embedding Layer** maps each word to a vector using a pre-trained word embedding model. +- **Encoding Layer** extracts context infomation for each position in question and passages with a bi-directional LSTM network. +- **Attention Flow Layer** couples the query and context vectors and produces a set of query-aware feature vectors for each word in the context. Please refer to [BiDAF](https://arxiv.org/abs/1611.01603) for more details. +- **Fusion Layer** employs a layer of bi-directional LSTM to capture the interaction among context words independent of the query. +- **Decode Layer** employs an answer point network with attention pooling of the quesiton to locate the positions of answers from passages. Please refer to [Match-LSTM](https://arxiv.org/abs/1608.07905) and [R-NET](https://www.microsoft.com/en-us/research/wp-content/uploads/2017/05/r-net.pdf) for more details. + +## How to Run +### Download the Dataset +To Download DuReader dataset: +``` +cd data && bash download.sh +``` +For more details about DuReader dataset please refer to [DuReader Dataset Homepage](https://ai.baidu.com//broad/subordinate?dataset=dureader). + +### Download Thirdparty Dependencies +We use Bleu and Rouge as evaluation metrics, the calculation of these metrics relies on the scoring scripts under [coco-caption](https://github.com/tylin/coco-caption), to download them, run: + +``` +cd utils && bash download_thirdparty.sh +``` +### Environment Requirements +For now we've only tested on PaddlePaddle v1.0, to install PaddlePaddle and for more details about PaddlePaddle, see [PaddlePaddle Homepage](http://paddlepaddle.org). + +### Preparation +Before training the model, we have to make sure that the data is ready. For preparation, we will check the data files, make directories and extract a vocabulary for later use. You can run the following command to do this with a specified task name: + +``` +sh run.sh --prepare +``` +You can specify the files for train/dev/test by setting the `trainset`/`devset`/`testset`. +### Training +To train the model and you can also set the hyper-parameters such as the learning rate by using `--learning_rate NUM`. For example, to train the model for 10 passes, you can run: + +``` +sh run.sh --train --pass_num 10 +``` + +The training process includes an evaluation on the dev set after each training epoch. By default, the model with the least Bleu-4 score on the dev set will be saved. + +### Evaluation +To conduct a single evaluation on the dev set with the the model already trained, you can run the following command: + +``` +sh run.sh --evaluate --load_dir models/1 +``` + +### Prediction +You can also predict answers for the samples in some files using the following command: + +``` +sh run.sh --predict --load_dir models/1 --testset ../data/demo/devset/search.dev.json +``` + +By default, the results are saved at `../data/results/` folder. You can change this by specifying `--result_dir DIR_PATH`. diff --git a/fluid/machine_reading_comprehesion/args.py b/fluid/machine_reading_comprehesion/args.py new file mode 100644 index 0000000000000000000000000000000000000000..228375584eec4d9602bb77a853cfd61c4016e909 --- /dev/null +++ b/fluid/machine_reading_comprehesion/args.py @@ -0,0 +1,119 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import distutils.util + + +def parse_args(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '--prepare', + action='store_true', + help='create the directories, prepare the vocabulary and embeddings') + parser.add_argument('--train', action='store_true', help='train the model') + parser.add_argument( + '--evaluate', action='store_true', help='evaluate the model on dev set') + parser.add_argument( + '--predict', + action='store_true', + help='predict the answers for test set with trained model') + parser.add_argument( + "--embed_size", + type=int, + default=300, + help="The dimension of embedding table. (default: %(default)d)") + parser.add_argument( + "--hidden_size", + type=int, + default=300, + help="The size of rnn hidden unit. (default: %(default)d)") + parser.add_argument( + "--batch_size", + type=int, + default=32, + help="The sequence number of a mini-batch data. (default: %(default)d)") + parser.add_argument( + "--pass_num", + type=int, + default=5, + help="The pass number to train. (default: %(default)d)") + parser.add_argument( + "--learning_rate", + type=float, + default=0.001, + help="Learning rate used to train the model. (default: %(default)f)") + parser.add_argument( + "--use_gpu", + type=distutils.util.strtobool, + default=True, + help="Whether to use gpu. (default: %(default)d)") + parser.add_argument( + "--save_dir", + type=str, + default="model", + help="Specify the path to save trained models.") + parser.add_argument( + "--load_dir", + type=str, + default="", + help="Specify the path to load trained models.") + parser.add_argument( + "--save_interval", + type=int, + default=1, + help="Save the trained model every n passes." + "(default: %(default)d)") + parser.add_argument( + "--log_interval", + type=int, + default=50, + help="log the train loss every n batches." + "(default: %(default)d)") + parser.add_argument( + "--dev_interval", + type=int, + default=1000, + help="cal dev loss every n batches." + "(default: %(default)d)") + parser.add_argument('--optim', default='adam', help='optimizer type') + parser.add_argument('--trainset', nargs='+', help='train dataset') + parser.add_argument('--devset', nargs='+', help='dev dataset') + parser.add_argument('--testset', nargs='+', help='test dataset') + parser.add_argument('--vocab_dir', help='dict') + parser.add_argument('--max_p_num', type=int, default=5) + parser.add_argument('--max_a_len', type=int, default=200) + parser.add_argument('--max_p_len', type=int, default=500) + parser.add_argument('--max_q_len', type=int, default=9) + parser.add_argument('--doc_num', type=int, default=5) + parser.add_argument('--para_print', action='store_true') + parser.add_argument('--drop_rate', type=float, default=0.0) + parser.add_argument('--random_seed', type=int, default=123) + parser.add_argument( + '--log_path', + help='path of the log file. If not set, logs are printed to console') + parser.add_argument( + '--result_dir', + default='../data/results/', + help='the dir to output the results') + parser.add_argument( + '--result_name', + default='test_result', + help='the file name of the results') + args = parser.parse_args() + return args diff --git a/fluid/machine_reading_comprehesion/data/download.sh b/fluid/machine_reading_comprehesion/data/download.sh new file mode 100644 index 0000000000000000000000000000000000000000..41f79dd0cb492d95691a2240e807ed613fd11c8d --- /dev/null +++ b/fluid/machine_reading_comprehesion/data/download.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +if [[ -d preprocessed ]] && [[ -d raw ]]; then + echo "data exist" + exit 0 +else + wget -c --no-check-certificate http://dureader.gz.bcebos.com/dureader_preprocessed.zip +fi + +if md5sum --status -c md5sum.txt; then + unzip dureader_preprocessed.zip +else + echo "download data error!" >> /dev/stderr + exit 1 +fi diff --git a/fluid/machine_reading_comprehesion/data/md5sum.txt b/fluid/machine_reading_comprehesion/data/md5sum.txt new file mode 100644 index 0000000000000000000000000000000000000000..d6bce75a937995de3c29d2b7e029a13e82731e04 --- /dev/null +++ b/fluid/machine_reading_comprehesion/data/md5sum.txt @@ -0,0 +1 @@ +7a4c28026f7dc94e8135d17203c63664 dureader_preprocessed.zip diff --git a/fluid/machine_reading_comprehesion/dataset.py b/fluid/machine_reading_comprehesion/dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..7a5cea1860745e63e15727bb0cc45733b4e2c8fa --- /dev/null +++ b/fluid/machine_reading_comprehesion/dataset.py @@ -0,0 +1,259 @@ +# -*- coding:utf8 -*- +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This module implements data process strategies. +""" + +import os +import json +import logging +import numpy as np +from collections import Counter + + +class BRCDataset(object): + """ + This module implements the APIs for loading and using baidu reading comprehension dataset + """ + + def __init__(self, + max_p_num, + max_p_len, + max_q_len, + train_files=[], + dev_files=[], + test_files=[]): + self.logger = logging.getLogger("brc") + self.max_p_num = max_p_num + self.max_p_len = max_p_len + self.max_q_len = max_q_len + + self.train_set, self.dev_set, self.test_set = [], [], [] + if train_files: + for train_file in train_files: + self.train_set += self._load_dataset(train_file, train=True) + self.logger.info('Train set size: {} questions.'.format( + len(self.train_set))) + + if dev_files: + for dev_file in dev_files: + self.dev_set += self._load_dataset(dev_file) + self.logger.info('Dev set size: {} questions.'.format( + len(self.dev_set))) + + if test_files: + for test_file in test_files: + self.test_set += self._load_dataset(test_file) + self.logger.info('Test set size: {} questions.'.format( + len(self.test_set))) + + def _load_dataset(self, data_path, train=False): + """ + Loads the dataset + Args: + data_path: the data file to load + """ + with open(data_path) as fin: + data_set = [] + for lidx, line in enumerate(fin): + sample = json.loads(line.strip()) + if train: + if len(sample['answer_spans']) == 0: + continue + if sample['answer_spans'][0][1] >= self.max_p_len: + continue + + if 'answer_docs' in sample: + sample['answer_passages'] = sample['answer_docs'] + + sample['question_tokens'] = sample['segmented_question'] + + sample['passages'] = [] + for d_idx, doc in enumerate(sample['documents']): + if train: + most_related_para = doc['most_related_para'] + sample['passages'].append({ + 'passage_tokens': + doc['segmented_paragraphs'][most_related_para], + 'is_selected': doc['is_selected'] + }) + else: + para_infos = [] + for para_tokens in doc['segmented_paragraphs']: + question_tokens = sample['segmented_question'] + common_with_question = Counter( + para_tokens) & Counter(question_tokens) + correct_preds = sum(common_with_question.values()) + if correct_preds == 0: + recall_wrt_question = 0 + else: + recall_wrt_question = float( + correct_preds) / len(question_tokens) + para_infos.append((para_tokens, recall_wrt_question, + len(para_tokens))) + para_infos.sort(key=lambda x: (-x[1], x[2])) + fake_passage_tokens = [] + for para_info in para_infos[:1]: + fake_passage_tokens += para_info[0] + sample['passages'].append({ + 'passage_tokens': fake_passage_tokens + }) + data_set.append(sample) + return data_set + + def _one_mini_batch(self, data, indices, pad_id): + """ + Get one mini batch + Args: + data: all data + indices: the indices of the samples to be selected + pad_id: + Returns: + one batch of data + """ + batch_data = { + 'raw_data': [data[i] for i in indices], + 'question_token_ids': [], + 'question_length': [], + 'passage_token_ids': [], + 'passage_length': [], + 'start_id': [], + 'end_id': [] + } + max_passage_num = max( + [len(sample['passages']) for sample in batch_data['raw_data']]) + #max_passage_num = min(self.max_p_num, max_passage_num) + max_passage_num = self.max_p_num + for sidx, sample in enumerate(batch_data['raw_data']): + for pidx in range(max_passage_num): + if pidx < len(sample['passages']): + batch_data['question_token_ids'].append(sample[ + 'question_token_ids']) + batch_data['question_length'].append( + len(sample['question_token_ids'])) + passage_token_ids = sample['passages'][pidx][ + 'passage_token_ids'] + batch_data['passage_token_ids'].append(passage_token_ids) + batch_data['passage_length'].append( + min(len(passage_token_ids), self.max_p_len)) + else: + batch_data['question_token_ids'].append([]) + batch_data['question_length'].append(0) + batch_data['passage_token_ids'].append([]) + batch_data['passage_length'].append(0) + batch_data, padded_p_len, padded_q_len = self._dynamic_padding( + batch_data, pad_id) + for sample in batch_data['raw_data']: + if 'answer_passages' in sample and len(sample['answer_passages']): + gold_passage_offset = padded_p_len * sample['answer_passages'][ + 0] + batch_data['start_id'].append(gold_passage_offset + sample[ + 'answer_spans'][0][0]) + batch_data['end_id'].append(gold_passage_offset + sample[ + 'answer_spans'][0][1]) + else: + # fake span for some samples, only valid for testing + batch_data['start_id'].append(0) + batch_data['end_id'].append(0) + return batch_data + + def _dynamic_padding(self, batch_data, pad_id): + """ + Dynamically pads the batch_data with pad_id + """ + pad_p_len = min(self.max_p_len, max(batch_data['passage_length'])) + pad_q_len = min(self.max_q_len, max(batch_data['question_length'])) + batch_data['passage_token_ids'] = [ + (ids + [pad_id] * (pad_p_len - len(ids)))[:pad_p_len] + for ids in batch_data['passage_token_ids'] + ] + batch_data['question_token_ids'] = [ + (ids + [pad_id] * (pad_q_len - len(ids)))[:pad_q_len] + for ids in batch_data['question_token_ids'] + ] + return batch_data, pad_p_len, pad_q_len + + def word_iter(self, set_name=None): + """ + Iterates over all the words in the dataset + Args: + set_name: if it is set, then the specific set will be used + Returns: + a generator + """ + if set_name is None: + data_set = self.train_set + self.dev_set + self.test_set + elif set_name == 'train': + data_set = self.train_set + elif set_name == 'dev': + data_set = self.dev_set + elif set_name == 'test': + data_set = self.test_set + else: + raise NotImplementedError('No data set named as {}'.format( + set_name)) + if data_set is not None: + for sample in data_set: + for token in sample['question_tokens']: + yield token + for passage in sample['passages']: + for token in passage['passage_tokens']: + yield token + + def convert_to_ids(self, vocab): + """ + Convert the question and passage in the original dataset to ids + Args: + vocab: the vocabulary on this dataset + """ + for data_set in [self.train_set, self.dev_set, self.test_set]: + if data_set is None: + continue + for sample in data_set: + sample['question_token_ids'] = vocab.convert_to_ids(sample[ + 'question_tokens']) + for passage in sample['passages']: + passage['passage_token_ids'] = vocab.convert_to_ids(passage[ + 'passage_tokens']) + + def gen_mini_batches(self, set_name, batch_size, pad_id, shuffle=True): + """ + Generate data batches for a specific dataset (train/dev/test) + Args: + set_name: train/dev/test to indicate the set + batch_size: number of samples in one batch + pad_id: pad id + shuffle: if set to be true, the data is shuffled. + Returns: + a generator for all batches + """ + if set_name == 'train': + data = self.train_set + elif set_name == 'dev': + data = self.dev_set + elif set_name == 'test': + data = self.test_set + else: + raise NotImplementedError('No data set named as {}'.format( + set_name)) + data_size = len(data) + indices = np.arange(data_size) + if shuffle: + np.random.shuffle(indices) + for batch_start in np.arange(0, data_size, batch_size): + batch_indices = indices[batch_start:batch_start + batch_size] + yield self._one_mini_batch(data, batch_indices, pad_id) diff --git a/fluid/machine_reading_comprehesion/rc_model.py b/fluid/machine_reading_comprehesion/rc_model.py new file mode 100644 index 0000000000000000000000000000000000000000..11d5b5d91d734a82d687a09b587ad614d0f03fff --- /dev/null +++ b/fluid/machine_reading_comprehesion/rc_model.py @@ -0,0 +1,312 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import paddle.fluid.layers as layers +import paddle.fluid as fluid +import numpy as np + + +def dropout(input, args): + if args.drop_rate: + return layers.dropout( + input, + dropout_prob=args.drop_rate, + seed=args.random_seed, + is_test=False) + else: + return input + + +def bi_lstm_encoder(input_seq, gate_size, para_name, args): + # A bi-directional lstm encoder implementation. + # Linear transformation part for input gate, output gate, forget gate + # and cell activation vectors need be done outside of dynamic_lstm. + # So the output size is 4 times of gate_size. + + input_forward_proj = layers.fc( + input=input_seq, + param_attr=fluid.ParamAttr(name=para_name + '_fw_gate_w'), + size=gate_size * 4, + act=None, + bias_attr=False) + input_reversed_proj = layers.fc( + input=input_seq, + param_attr=fluid.ParamAttr(name=para_name + '_bw_gate_w'), + size=gate_size * 4, + act=None, + bias_attr=False) + forward, _ = layers.dynamic_lstm( + input=input_forward_proj, + size=gate_size * 4, + use_peepholes=False, + param_attr=fluid.ParamAttr(name=para_name + '_fw_lstm_w'), + bias_attr=fluid.ParamAttr(name=para_name + '_fw_lstm_b')) + reversed, _ = layers.dynamic_lstm( + input=input_reversed_proj, + param_attr=fluid.ParamAttr(name=para_name + '_bw_lstm_w'), + bias_attr=fluid.ParamAttr(name=para_name + '_bw_lstm_b'), + size=gate_size * 4, + is_reverse=True, + use_peepholes=False) + + encoder_out = layers.concat(input=[forward, reversed], axis=1) + return encoder_out + + +def encoder(input_name, para_name, shape, hidden_size, args): + input_ids = layers.data( + name=input_name, shape=[1], dtype='int64', lod_level=1) + input_embedding = layers.embedding( + input=input_ids, + size=shape, + dtype='float32', + is_sparse=True, + param_attr=fluid.ParamAttr(name='embedding_para')) + + encoder_out = bi_lstm_encoder( + input_seq=input_embedding, + gate_size=hidden_size, + para_name=para_name, + args=args) + return dropout(encoder_out, args) + + +def attn_flow(q_enc, p_enc, p_ids_name, args): + tag = p_ids_name + "::" + drnn = layers.DynamicRNN() + with drnn.block(): + h_cur = drnn.step_input(p_enc) + u_all = drnn.static_input(q_enc) + h_expd = layers.sequence_expand(x=h_cur, y=u_all) + s_t_mul = layers.elementwise_mul(x=u_all, y=h_expd, axis=0) + s_t_sum = layers.reduce_sum(input=s_t_mul, dim=1, keep_dim=True) + s_t_re = layers.reshape(s_t_sum, shape=[-1, 0]) + s_t = layers.sequence_softmax(input=s_t_re) + u_expr = layers.elementwise_mul(x=u_all, y=s_t, axis=0) + u_expr = layers.sequence_pool(input=u_expr, pool_type='sum') + + b_t = layers.sequence_pool(input=s_t_sum, pool_type='max') + drnn.output(u_expr, b_t) + U_expr, b = drnn() + b_norm = layers.sequence_softmax(input=b) + h_expr = layers.elementwise_mul(x=p_enc, y=b_norm, axis=0) + h_expr = layers.sequence_pool(input=h_expr, pool_type='sum') + + H_expr = layers.sequence_expand(x=h_expr, y=p_enc) + H_expr = layers.lod_reset(x=H_expr, y=p_enc) + h_u = layers.elementwise_mul(x=p_enc, y=U_expr, axis=0) + h_h = layers.elementwise_mul(x=p_enc, y=H_expr, axis=0) + + g = layers.concat(input=[p_enc, U_expr, h_u, h_h], axis=1) + return dropout(g, args) + + +def lstm_step(x_t, hidden_t_prev, cell_t_prev, size, para_name, args): + def linear(inputs, para_name, args): + return layers.fc(input=inputs, + size=size, + param_attr=fluid.ParamAttr(name=para_name + '_w'), + bias_attr=fluid.ParamAttr(name=para_name + '_b')) + + input_cat = layers.concat([hidden_t_prev, x_t], axis=1) + forget_gate = layers.sigmoid(x=linear(input_cat, para_name + '_lstm_f', + args)) + input_gate = layers.sigmoid(x=linear(input_cat, para_name + '_lstm_i', + args)) + output_gate = layers.sigmoid(x=linear(input_cat, para_name + '_lstm_o', + args)) + cell_tilde = layers.tanh(x=linear(input_cat, para_name + '_lstm_c', args)) + + cell_t = layers.sums(input=[ + layers.elementwise_mul( + x=forget_gate, y=cell_t_prev), layers.elementwise_mul( + x=input_gate, y=cell_tilde) + ]) + + hidden_t = layers.elementwise_mul(x=output_gate, y=layers.tanh(x=cell_t)) + + return hidden_t, cell_t + + +#point network +def point_network_decoder(p_vec, q_vec, hidden_size, args): + tag = 'pn_decoder:' + init_random = fluid.initializer.Normal(loc=0.0, scale=1.0) + + random_attn = layers.create_parameter( + shape=[1, hidden_size], + dtype='float32', + default_initializer=init_random) + random_attn = layers.fc( + input=random_attn, + size=hidden_size, + act=None, + param_attr=fluid.ParamAttr(name=tag + 'random_attn_fc_w'), + bias_attr=fluid.ParamAttr(name=tag + 'random_attn_fc_b')) + random_attn = layers.reshape(random_attn, shape=[-1]) + U = layers.fc(input=q_vec, + param_attr=fluid.ParamAttr(name=tag + 'q_vec_fc_w'), + bias_attr=False, + size=hidden_size, + act=None) + random_attn + U = layers.tanh(U) + + logits = layers.fc(input=U, + param_attr=fluid.ParamAttr(name=tag + 'logits_fc_w'), + bias_attr=fluid.ParamAttr(name=tag + 'logits_fc_b'), + size=1, + act=None) + scores = layers.sequence_softmax(input=logits) + pooled_vec = layers.elementwise_mul(x=q_vec, y=scores, axis=0) + pooled_vec = layers.sequence_pool(input=pooled_vec, pool_type='sum') + + init_state = layers.fc( + input=pooled_vec, + param_attr=fluid.ParamAttr(name=tag + 'init_state_fc_w'), + bias_attr=fluid.ParamAttr(name=tag + 'init_state_fc_b'), + size=hidden_size, + act=None) + + def custom_dynamic_rnn(p_vec, init_state, hidden_size, para_name, args): + tag = para_name + "custom_dynamic_rnn:" + + def static_rnn(step, + p_vec=p_vec, + init_state=None, + para_name='', + args=args): + tag = para_name + "static_rnn:" + ctx = layers.fc( + input=p_vec, + param_attr=fluid.ParamAttr(name=tag + 'context_fc_w'), + bias_attr=fluid.ParamAttr(name=tag + 'context_fc_b'), + size=hidden_size, + act=None) + + beta = [] + c_prev = init_state + m_prev = init_state + for i in range(step): + m_prev0 = layers.fc( + input=m_prev, + size=hidden_size, + act=None, + param_attr=fluid.ParamAttr(name=tag + 'm_prev0_fc_w'), + bias_attr=fluid.ParamAttr(name=tag + 'm_prev0_fc_b')) + m_prev1 = layers.sequence_expand(x=m_prev0, y=ctx) + + Fk = ctx + m_prev1 + Fk = layers.tanh(Fk) + logits = layers.fc( + input=Fk, + size=1, + act=None, + param_attr=fluid.ParamAttr(name=tag + 'logits_fc_w'), + bias_attr=fluid.ParamAttr(name=tag + 'logits_fc_b')) + + scores = layers.sequence_softmax(input=logits) + attn_ctx = layers.elementwise_mul(x=p_vec, y=scores, axis=0) + attn_ctx = layers.sequence_pool(input=attn_ctx, pool_type='sum') + + hidden_t, cell_t = lstm_step( + attn_ctx, + hidden_t_prev=m_prev, + cell_t_prev=c_prev, + size=hidden_size, + para_name=tag, + args=args) + m_prev = hidden_t + c_prev = cell_t + beta.append(scores) + return beta + + return static_rnn( + 2, p_vec=p_vec, init_state=init_state, para_name=para_name) + + fw_outputs = custom_dynamic_rnn(p_vec, init_state, hidden_size, tag + "fw:", + args) + bw_outputs = custom_dynamic_rnn(p_vec, init_state, hidden_size, tag + "bw:", + args) + + start_prob = layers.elementwise_add( + x=fw_outputs[0], y=bw_outputs[1], axis=0) / 2 + end_prob = layers.elementwise_add( + x=fw_outputs[1], y=bw_outputs[0], axis=0) / 2 + + return start_prob, end_prob + + +def fusion(g, args): + m = bi_lstm_encoder( + input_seq=g, gate_size=args.hidden_size, para_name='fusion', args=args) + return dropout(m, args) + + +def rc_model(hidden_size, vocab, args): + emb_shape = [vocab.size(), vocab.embed_dim] + # stage 1:encode + p_ids_names = [] + q_ids_names = [] + ms = [] + gs = [] + qs = [] + for i in range(args.doc_num): + p_ids_name = "pids_%d" % i + p_ids_names.append(p_ids_name) + p_enc_i = encoder(p_ids_name, 'p_enc', emb_shape, hidden_size, args) + + q_ids_name = "qids_%d" % i + q_ids_names.append(q_ids_name) + q_enc_i = encoder(q_ids_name, 'q_enc', emb_shape, hidden_size, args) + + # stage 2:match + g_i = attn_flow(q_enc_i, p_enc_i, p_ids_name, args) + # stage 3:fusion + m_i = fusion(g_i, args) + ms.append(m_i) + gs.append(g_i) + qs.append(q_enc_i) + m = layers.sequence_concat(input=ms) + g = layers.sequence_concat(input=gs) + q_vec = layers.sequence_concat(input=qs) + + # stage 4:decode + start_probs, end_probs = point_network_decoder( + p_vec=m, q_vec=q_vec, hidden_size=hidden_size, args=args) + + start_labels = layers.data( + name="start_lables", shape=[1], dtype='float32', lod_level=1) + end_labels = layers.data( + name="end_lables", shape=[1], dtype='float32', lod_level=1) + + cost0 = layers.sequence_pool( + layers.cross_entropy( + input=start_probs, label=start_labels, soft_label=True), + 'sum') + cost1 = layers.sequence_pool( + layers.cross_entropy( + input=end_probs, label=end_labels, soft_label=True), + 'sum') + + cost0 = layers.mean(cost0) + cost1 = layers.mean(cost1) + cost = cost0 + cost1 + cost.persistable = True + + feeding_list = q_ids_names + ["start_lables", "end_lables"] + p_ids_names + return cost, start_probs, end_probs, feeding_list diff --git a/fluid/machine_reading_comprehesion/run.py b/fluid/machine_reading_comprehesion/run.py new file mode 100644 index 0000000000000000000000000000000000000000..bae54d42856787ef2c17481281ac6d14cb074812 --- /dev/null +++ b/fluid/machine_reading_comprehesion/run.py @@ -0,0 +1,519 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import time +import os +import random +import json + +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +from paddle.fluid.executor import Executor + +import sys +if sys.version[0] == '2': + reload(sys) + sys.setdefaultencoding("utf-8") +sys.path.append('..') + +from args import * +import rc_model +from dataset import BRCDataset +import logging +import pickle +from utils import normalize +from utils import compute_bleu_rouge +from vocab import Vocab + + +def prepare_batch_input(insts, args): + doc_num = args.doc_num + + batch_size = len(insts['raw_data']) + new_insts = [] + + for i in range(batch_size): + p_id = [] + q_id = [] + p_ids = [] + q_ids = [] + p_len = 0 + for j in range(i * doc_num, (i + 1) * doc_num): + p_ids.append(insts['passage_token_ids'][j]) + p_id = p_id + insts['passage_token_ids'][j] + q_ids.append(insts['question_token_ids'][j]) + q_id = q_id + insts['question_token_ids'][j] + p_len = len(p_id) + + def _get_label(idx, ref_len): + ret = [0.0] * ref_len + if idx >= 0 and idx < ref_len: + ret[idx] = 1.0 + return [[x] for x in ret] + + start_label = _get_label(insts['start_id'][i], p_len) + end_label = _get_label(insts['end_id'][i], p_len) + new_inst = q_ids + [start_label, end_label] + p_ids + new_insts.append(new_inst) + return new_insts + + +def LodTensor_Array(lod_tensor): + lod = lod_tensor.lod() + array = np.array(lod_tensor) + new_array = [] + for i in range(len(lod[0]) - 1): + new_array.append(array[lod[0][i]:lod[0][i + 1]]) + return new_array + + +def print_para(train_prog, train_exe, logger, args): + if args.para_print: + param_list = train_prog.block(0).all_parameters() + param_name_list = [p.name for p in param_list] + num_sum = 0 + for p_name in param_name_list: + p_array = np.array(train_exe.scope.find_var(p_name).get_tensor()) + param_num = np.prod(p_array.shape) + num_sum = num_sum + param_num + logger.info( + "param: {0}, mean={1} max={2} min={3} num={4} {5}".format( + p_name, + p_array.mean(), + p_array.max(), p_array.min(), p_array.shape, param_num)) + logger.info("total param num: {0}".format(num_sum)) + + +def find_best_answer_for_passage(start_probs, end_probs, passage_len, args): + """ + Finds the best answer with the maximum start_prob * end_prob from a single passage + """ + if passage_len is None: + passage_len = len(start_probs) + else: + passage_len = min(len(start_probs), passage_len) + best_start, best_end, max_prob = -1, -1, 0 + for start_idx in range(passage_len): + for ans_len in range(args.max_a_len): + end_idx = start_idx + ans_len + if end_idx >= passage_len: + continue + prob = start_probs[start_idx] * end_probs[end_idx] + if prob > max_prob: + best_start = start_idx + best_end = end_idx + max_prob = prob + return (best_start, best_end), max_prob + + +def find_best_answer(sample, start_prob, end_prob, padded_p_len, args): + """ + Finds the best answer for a sample given start_prob and end_prob for each position. + This will call find_best_answer_for_passage because there are multiple passages in a sample + """ + best_p_idx, best_span, best_score = None, None, 0 + for p_idx, passage in enumerate(sample['passages']): + if p_idx >= args.max_p_num: + continue + passage_len = min(args.max_p_len, len(passage['passage_tokens'])) + answer_span, score = find_best_answer_for_passage( + start_prob[p_idx * padded_p_len:(p_idx + 1) * padded_p_len], + end_prob[p_idx * padded_p_len:(p_idx + 1) * padded_p_len], + passage_len, args) + if score > best_score: + best_score = score + best_p_idx = p_idx + best_span = answer_span + if best_p_idx is None or best_span is None: + best_answer = '' + else: + best_answer = ''.join(sample['passages'][best_p_idx]['passage_tokens'][ + best_span[0]:best_span[1] + 1]) + return best_answer + + +def validation(inference_program, avg_cost, s_probs, e_probs, feed_order, place, + vocab, brc_data, logger, args): + """ + + """ + parallel_executor = fluid.ParallelExecutor( + main_program=inference_program, + use_cuda=bool(args.use_gpu), + loss_name=avg_cost.name) + print_para(inference_program, parallel_executor, logger, args) + + # Use test set as validation each pass + total_loss = 0.0 + count = 0 + pred_answers, ref_answers = [], [] + val_feed_list = [ + inference_program.global_block().var(var_name) + for var_name in feed_order + ] + val_feeder = fluid.DataFeeder(val_feed_list, place) + pad_id = vocab.get_id(vocab.pad_token) + dev_batches = brc_data.gen_mini_batches( + 'dev', args.batch_size, pad_id, shuffle=False) + + for batch_id, batch in enumerate(dev_batches, 1): + feed_data = prepare_batch_input(batch, args) + val_fetch_outs = parallel_executor.run( + feed=val_feeder.feed(feed_data), + fetch_list=[avg_cost.name, s_probs.name, e_probs.name], + return_numpy=False) + + total_loss += np.array(val_fetch_outs[0])[0] + + start_probs = LodTensor_Array(val_fetch_outs[1]) + end_probs = LodTensor_Array(val_fetch_outs[2]) + count += len(batch['raw_data']) + + padded_p_len = len(batch['passage_token_ids'][0]) + for sample, start_prob, end_prob in zip(batch['raw_data'], start_probs, + end_probs): + + best_answer = find_best_answer(sample, start_prob, end_prob, + padded_p_len, args) + pred_answers.append({ + 'question_id': sample['question_id'], + 'question_type': sample['question_type'], + 'answers': [best_answer], + 'entity_answers': [[]], + 'yesno_answers': [] + }) + if 'answers' in sample: + ref_answers.append({ + 'question_id': sample['question_id'], + 'question_type': sample['question_type'], + 'answers': sample['answers'], + 'entity_answers': [[]], + 'yesno_answers': [] + }) + if args.result_dir is not None and args.result_name is not None: + result_file = os.path.join(args.result_dir, args.result_name + '.json') + with open(result_file, 'w') as fout: + for pred_answer in pred_answers: + fout.write(json.dumps(pred_answer, ensure_ascii=False) + '\n') + logger.info('Saving {} results to {}'.format(args.result_name, + result_file)) + + ave_loss = 1.0 * total_loss / count + + # compute the bleu and rouge scores if reference answers is provided + if len(ref_answers) > 0: + pred_dict, ref_dict = {}, {} + for pred, ref in zip(pred_answers, ref_answers): + question_id = ref['question_id'] + if len(ref['answers']) > 0: + pred_dict[question_id] = normalize(pred['answers']) + ref_dict[question_id] = normalize(ref['answers']) + bleu_rouge = compute_bleu_rouge(pred_dict, ref_dict) + else: + bleu_rouge = None + return ave_loss, bleu_rouge + + +def train(logger, args): + logger.info('Load data_set and vocab...') + with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: + vocab = pickle.load(fin) + logger.info('vocab size is {} and embed dim is {}'.format(vocab.size( + ), vocab.embed_dim)) + brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, + args.trainset, args.devset) + logger.info('Converting text into ids...') + brc_data.convert_to_ids(vocab) + logger.info('Initialize the model...') + + # build model + main_program = fluid.Program() + startup_prog = fluid.Program() + main_program.random_seed = args.random_seed + startup_prog.random_seed = args.random_seed + with fluid.program_guard(main_program, startup_prog): + with fluid.unique_name.guard(): + avg_cost, s_probs, e_probs, feed_order = rc_model.rc_model( + args.hidden_size, vocab, args) + # clone from default main program and use it as the validation program + inference_program = main_program.clone(for_test=True) + + # build optimizer + if args.optim == 'sgd': + optimizer = fluid.optimizer.SGD( + learning_rate=args.learning_rate) + elif args.optim == 'adam': + optimizer = fluid.optimizer.Adam( + learning_rate=args.learning_rate) + elif args.optim == 'rprop': + optimizer = fluid.optimizer.RMSPropOptimizer( + learning_rate=args.learning_rate) + else: + logger.error('Unsupported optimizer: {}'.format(args.optim)) + exit(-1) + optimizer.minimize(avg_cost) + + # initialize parameters + place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + exe = Executor(place) + if args.load_dir: + logger.info('load from {}'.format(args.load_dir)) + fluid.io.load_persistables( + exe, args.load_dir, main_program=main_program) + else: + exe.run(startup_prog) + embedding_para = fluid.global_scope().find_var( + 'embedding_para').get_tensor() + embedding_para.set(vocab.embeddings.astype(np.float32), place) + + # prepare data + feed_list = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + logger.info('Training the model...') + parallel_executor = fluid.ParallelExecutor( + main_program=main_program, + use_cuda=bool(args.use_gpu), + loss_name=avg_cost.name) + print_para(main_program, parallel_executor, logger, args) + + for pass_id in range(1, args.pass_num + 1): + pass_start_time = time.time() + pad_id = vocab.get_id(vocab.pad_token) + train_batches = brc_data.gen_mini_batches( + 'train', args.batch_size, pad_id, shuffle=True) + log_every_n_batch, n_batch_loss = args.log_interval, 0 + total_num, total_loss = 0, 0 + for batch_id, batch in enumerate(train_batches, 1): + input_data_dict = prepare_batch_input(batch, args) + fetch_outs = parallel_executor.run( + feed=feeder.feed(input_data_dict), + fetch_list=[avg_cost.name], + return_numpy=False) + cost_train = np.array(fetch_outs[0])[0] + total_num += len(batch['raw_data']) + n_batch_loss += cost_train + total_loss += cost_train * len(batch['raw_data']) + if log_every_n_batch > 0 and batch_id % log_every_n_batch == 0: + print_para(main_program, parallel_executor, logger, + args) + logger.info( + 'Average loss from batch {} to {} is {}'.format( + batch_id - log_every_n_batch + 1, batch_id, + "%.10f" % (n_batch_loss / log_every_n_batch))) + n_batch_loss = 0 + if args.dev_interval > 0 and batch_id % args.dev_interval == 0: + eval_loss, bleu_rouge = validation( + inference_program, avg_cost, s_probs, e_probs, + feed_order, place, vocab, brc_data, logger, args) + logger.info('Dev eval loss {}'.format(eval_loss)) + logger.info('Dev eval result: {}'.format(bleu_rouge)) + pass_end_time = time.time() + + logger.info('Evaluating the model after epoch {}'.format( + pass_id)) + if brc_data.dev_set is not None: + eval_loss, bleu_rouge = validation( + inference_program, avg_cost, s_probs, e_probs, + feed_order, place, vocab, brc_data, logger, args) + logger.info('Dev eval loss {}'.format(eval_loss)) + logger.info('Dev eval result: {}'.format(bleu_rouge)) + else: + logger.warning( + 'No dev set is loaded for evaluation in the dataset!') + time_consumed = pass_end_time - pass_start_time + logger.info('Average train loss for epoch {} is {}'.format( + pass_id, "%.10f" % (1.0 * total_loss / total_num))) + + if pass_id % args.save_interval == 0: + model_path = os.path.join(args.save_dir, str(pass_id)) + if not os.path.isdir(model_path): + os.makedirs(model_path) + + fluid.io.save_persistables( + executor=exe, + dirname=model_path, + main_program=main_program) + + +def evaluate(logger, args): + logger.info('Load data_set and vocab...') + with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: + vocab = pickle.load(fin) + logger.info('vocab size is {} and embed dim is {}'.format(vocab.size( + ), vocab.embed_dim)) + brc_data = BRCDataset( + args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.devset) + logger.info('Converting text into ids...') + brc_data.convert_to_ids(vocab) + logger.info('Initialize the model...') + + # build model + main_program = fluid.Program() + startup_prog = fluid.Program() + main_program.random_seed = args.random_seed + startup_prog.random_seed = args.random_seed + with fluid.program_guard(main_program, startup_prog): + with fluid.unique_name.guard(): + avg_cost, s_probs, e_probs, feed_order = rc_model.rc_model( + args.hidden_size, vocab, args) + # initialize parameters + place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + exe = Executor(place) + if args.load_dir: + logger.info('load from {}'.format(args.load_dir)) + fluid.io.load_persistables( + exe, args.load_dir, main_program=main_program) + else: + logger.error('No model file to load ...') + return + + # prepare data + feed_list = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + inference_program = main_program.clone(for_test=True) + eval_loss, bleu_rouge = validation( + inference_program, avg_cost, s_probs, e_probs, feed_order, + place, vocab, brc_data, logger, args) + logger.info('Dev eval loss {}'.format(eval_loss)) + logger.info('Dev eval result: {}'.format(bleu_rouge)) + logger.info('Predicted answers are saved to {}'.format( + os.path.join(args.result_dir))) + + +def predict(logger, args): + logger.info('Load data_set and vocab...') + with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin: + vocab = pickle.load(fin) + logger.info('vocab size is {} and embed dim is {}'.format(vocab.size( + ), vocab.embed_dim)) + brc_data = BRCDataset( + args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.testset) + logger.info('Converting text into ids...') + brc_data.convert_to_ids(vocab) + logger.info('Initialize the model...') + + # build model + main_program = fluid.Program() + startup_prog = fluid.Program() + main_program.random_seed = args.random_seed + startup_prog.random_seed = args.random_seed + with fluid.program_guard(main_program, startup_prog): + with fluid.unique_name.guard(): + avg_cost, s_probs, e_probs, feed_order = rc_model.rc_model( + args.hidden_size, vocab, args) + # initialize parameters + place = core.CUDAPlace(0) if args.use_gpu else core.CPUPlace() + exe = Executor(place) + if args.load_dir: + logger.info('load from {}'.format(args.load_dir)) + fluid.io.load_persistables( + exe, args.load_dir, main_program=main_program) + else: + logger.error('No model file to load ...') + return + + # prepare data + feed_list = [ + main_program.global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + inference_program = main_program.clone(for_test=True) + eval_loss, bleu_rouge = validation( + inference_program, avg_cost, s_probs, e_probs, feed_order, + place, vocab, brc_data, logger, args) + + +def prepare(logger, args): + """ + checks data, creates the directories, prepare the vocabulary and embeddings + """ + logger.info('Checking the data files...') + for data_path in args.trainset + args.devset + args.testset: + assert os.path.exists(data_path), '{} file does not exist.'.format( + data_path) + logger.info('Preparing the directories...') + for dir_path in [args.vocab_dir, args.save_dir, args.result_dir]: + if not os.path.exists(dir_path): + os.makedirs(dir_path) + + logger.info('Building vocabulary...') + brc_data = BRCDataset(args.max_p_num, args.max_p_len, args.max_q_len, + args.trainset, args.devset, args.testset) + vocab = Vocab(lower=True) + for word in brc_data.word_iter('train'): + vocab.add(word) + + unfiltered_vocab_size = vocab.size() + vocab.filter_tokens_by_cnt(min_cnt=2) + filtered_num = unfiltered_vocab_size - vocab.size() + logger.info('After filter {} tokens, the final vocab size is {}'.format( + filtered_num, vocab.size())) + + logger.info('Assigning embeddings...') + vocab.randomly_init_embeddings(args.embed_size) + + logger.info('Saving vocab...') + with open(os.path.join(args.vocab_dir, 'vocab.data'), 'wb') as fout: + pickle.dump(vocab, fout) + + logger.info('Done with preparing!') + + +if __name__ == '__main__': + args = parse_args() + + random.seed(args.random_seed) + np.random.seed(args.random_seed) + + logger = logging.getLogger("brc") + logger.setLevel(logging.INFO) + formatter = logging.Formatter( + '%(asctime)s - %(name)s - %(levelname)s - %(message)s') + if args.log_path: + file_handler = logging.FileHandler(args.log_path) + file_handler.setLevel(logging.INFO) + file_handler.setFormatter(formatter) + logger.addHandler(file_handler) + else: + console_handler = logging.StreamHandler() + console_handler.setLevel(logging.INFO) + console_handler.setFormatter(formatter) + logger.addHandler(console_handler) + args = parse_args() + logger.info('Running with args : {}'.format(args)) + if args.prepare: + prepare(logger, args) + if args.train: + train(logger, args) + if args.evaluate: + evaluate(logger, args) + if args.predict: + predict(logger, args) diff --git a/fluid/machine_reading_comprehesion/run.sh b/fluid/machine_reading_comprehesion/run.sh new file mode 100644 index 0000000000000000000000000000000000000000..4bcab2beecba4a7951f09ee500f40fa947738365 --- /dev/null +++ b/fluid/machine_reading_comprehesion/run.sh @@ -0,0 +1,21 @@ +export CUDA_VISIBLE_DEVICES=1 +python run.py \ +--trainset 'data/preprocessed/trainset/search.train.json' \ + 'data/preprocessed/trainset/zhidao.train.json' \ +--devset 'data/preprocessed/devset/search.dev.json' \ + 'data/preprocessed/devset/zhidao.dev.json' \ +--testset 'data/preprocessed/testset/search.test.json' \ + 'data/preprocessed/testset/zhidao.test.json' \ +--vocab_dir 'data/vocab' \ +--use_gpu true \ +--save_dir ./models \ +--pass_num 10 \ +--learning_rate 0.001 \ +--batch_size 8 \ +--embed_size 300 \ +--hidden_size 150 \ +--max_p_num 5 \ +--max_p_len 500 \ +--max_q_len 60 \ +--max_a_len 200 \ +--drop_rate 0.2 $@\ diff --git a/fluid/machine_reading_comprehesion/utils/__init__.py b/fluid/machine_reading_comprehesion/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9d840fd2698d2bb6bccd5bf82cfa51d15e938085 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/__init__.py @@ -0,0 +1,35 @@ +# coding:utf8 +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This package implements some utility functions shared by PaddlePaddle +and Tensorflow model implementations. + +Authors: liuyuan(liuyuan04@baidu.com) +Date: 2017/10/06 18:23:06 +""" + +from .dureader_eval import compute_bleu_rouge +from .dureader_eval import normalize +from .preprocess import find_fake_answer +from .preprocess import find_best_question_match + +__all__ = [ + 'compute_bleu_rouge', + 'normalize', + 'find_fake_answer', + 'find_best_question_match', +] diff --git a/fluid/machine_reading_comprehesion/utils/download_thirdparty.sh b/fluid/machine_reading_comprehesion/utils/download_thirdparty.sh new file mode 100755 index 0000000000000000000000000000000000000000..cc37da879971c2279edab220485d08d9b20c35fa --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/download_thirdparty.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +# We use Bleu and Rouge as evaluation metrics, the calculation of these metrics +# relies on the scoring scripts under "https://github.com/tylin/coco-caption" + +bleu_base_url='https://raw.githubusercontent.com/tylin/coco-caption/master/pycocoevalcap/bleu' +bleu_files=("LICENSE" "__init__.py" "bleu.py" "bleu_scorer.py") + +rouge_base_url="https://raw.githubusercontent.com/tylin/coco-caption/master/pycocoevalcap/rouge" +rouge_files=("__init__.py" "rouge.py") + +download() { + local metric=$1; shift; + local base_url=$1; shift; + local fnames=($@); + + mkdir -p ${metric} + for fname in ${fnames[@]}; + do + printf "downloading: %s\n" ${base_url}/${fname} + wget --no-check-certificate ${base_url}/${fname} -O ${metric}/${fname} + done +} + +# prepare rouge +download "rouge_metric" ${rouge_base_url} ${rouge_files[@]} + +# prepare bleu +download "bleu_metric" ${bleu_base_url} ${bleu_files[@]} + +# convert python 2.x source code to python 3.x +2to3 -w "../utils/bleu_metric/bleu_scorer.py" +2to3 -w "../utils/bleu_metric/bleu.py" diff --git a/fluid/machine_reading_comprehesion/utils/dureader_eval.py b/fluid/machine_reading_comprehesion/utils/dureader_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..d60988871a63ce304fc1afbf0af7b1c1801e2161 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/dureader_eval.py @@ -0,0 +1,546 @@ +# -*- coding:utf8 -*- +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This module computes evaluation metrics for DuReader dataset. +""" + +import argparse +import json +import sys +import zipfile + +from collections import Counter +from .bleu_metric.bleu import Bleu +from .rouge_metric.rouge import Rouge + +EMPTY = '' +YESNO_LABELS = set(['Yes', 'No', 'Depends']) + + +def normalize(s): + """ + Normalize strings to space joined chars. + + Args: + s: a list of strings. + + Returns: + A list of normalized strings. + """ + if not s: + return s + normalized = [] + for ss in s: + tokens = [c for c in list(ss) if len(c.strip()) != 0] + normalized.append(' '.join(tokens)) + return normalized + + +def data_check(obj, task): + """ + Check data. + + Raises: + Raises AssertionError when data is not legal. + """ + assert 'question_id' in obj, "Missing 'question_id' field." + assert 'question_type' in obj, \ + "Missing 'question_type' field. question_id: {}".format(obj['question_type']) + + assert 'yesno_answers' in obj, \ + "Missing 'yesno_answers' field. question_id: {}".format(obj['question_id']) + assert isinstance(obj['yesno_answers'], list), \ + r"""'yesno_answers' field must be a list, if the 'question_type' is not + 'YES_NO', then this field should be an empty list. + question_id: {}""".format(obj['question_id']) + + assert 'entity_answers' in obj, \ + "Missing 'entity_answers' field. question_id: {}".format(obj['question_id']) + assert isinstance(obj['entity_answers'], list) \ + and len(obj['entity_answers']) > 0, \ + r"""'entity_answers' field must be a list, and has at least one element, + which can be a empty list. question_id: {}""".format(obj['question_id']) + + +def read_file(file_name, task, is_ref=False): + """ + Read predict answers or reference answers from file. + + Args: + file_name: the name of the file containing predict result or reference + result. + + Returns: + A dictionary mapping question_id to the result information. The result + information itself is also a dictionary with has four keys: + - question_type: type of the query. + - yesno_answers: A list of yesno answers corresponding to 'answers'. + - answers: A list of predicted answers. + - entity_answers: A list, each element is also a list containing the entities + tagged out from the corresponding answer string. + """ + + def _open(file_name, mode, zip_obj=None): + if zip_obj is not None: + return zip_obj.open(file_name, mode) + return open(file_name, mode) + + results = {} + keys = ['answers', 'yesno_answers', 'entity_answers', 'question_type'] + if is_ref: + keys += ['source'] + + zf = zipfile.ZipFile(file_name, 'r') if file_name.endswith('.zip') else None + file_list = [file_name] if zf is None else zf.namelist() + + for fn in file_list: + for line in _open(fn, 'r', zip_obj=zf): + try: + obj = json.loads(line.strip()) + except ValueError: + raise ValueError("Every line of data should be legal json") + data_check(obj, task) + qid = obj['question_id'] + assert qid not in results, "Duplicate question_id: {}".format(qid) + results[qid] = {} + for k in keys: + results[qid][k] = obj[k] + return results + + +def compute_bleu_rouge(pred_dict, ref_dict, bleu_order=4): + """ + Compute bleu and rouge scores. + """ + assert set(pred_dict.keys()) == set(ref_dict.keys()), \ + "missing keys: {}".format(set(ref_dict.keys()) - set(pred_dict.keys())) + scores = {} + bleu_scores, _ = Bleu(bleu_order).compute_score(ref_dict, pred_dict) + for i, bleu_score in enumerate(bleu_scores): + scores['Bleu-%d' % (i + 1)] = bleu_score + rouge_score, _ = Rouge().compute_score(ref_dict, pred_dict) + scores['Rouge-L'] = rouge_score + return scores + + +def local_prf(pred_list, ref_list): + """ + Compute local precision recall and f1-score, + given only one prediction list and one reference list + """ + common = Counter(pred_list) & Counter(ref_list) + num_same = sum(common.values()) + if num_same == 0: + return 0, 0, 0 + p = 1.0 * num_same / len(pred_list) + r = 1.0 * num_same / len(ref_list) + f1 = (2 * p * r) / (p + r) + return p, r, f1 + + +def compute_prf(pred_dict, ref_dict): + """ + Compute precision recall and f1-score. + """ + pred_question_ids = set(pred_dict.keys()) + ref_question_ids = set(ref_dict.keys()) + correct_preds, total_correct, total_preds = 0, 0, 0 + for question_id in ref_question_ids: + pred_entity_list = pred_dict.get(question_id, [[]]) + assert len(pred_entity_list) == 1, \ + 'the number of entity list for question_id {} is not 1.'.format(question_id) + pred_entity_list = pred_entity_list[0] + all_ref_entity_lists = ref_dict[question_id] + best_local_f1 = 0 + best_ref_entity_list = None + for ref_entity_list in all_ref_entity_lists: + local_f1 = local_prf(pred_entity_list, ref_entity_list)[2] + if local_f1 > best_local_f1: + best_ref_entity_list = ref_entity_list + best_local_f1 = local_f1 + if best_ref_entity_list is None: + if len(all_ref_entity_lists) > 0: + best_ref_entity_list = sorted( + all_ref_entity_lists, key=lambda x: len(x))[0] + else: + best_ref_entity_list = [] + gold_entities = set(best_ref_entity_list) + pred_entities = set(pred_entity_list) + correct_preds += len(gold_entities & pred_entities) + total_preds += len(pred_entities) + total_correct += len(gold_entities) + p = float(correct_preds) / total_preds if correct_preds > 0 else 0 + r = float(correct_preds) / total_correct if correct_preds > 0 else 0 + f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0 + return {'Precision': p, 'Recall': r, 'F1': f1} + + +def prepare_prf(pred_dict, ref_dict): + """ + Prepares data for calculation of prf scores. + """ + preds = {k: v['entity_answers'] for k, v in pred_dict.items()} + refs = {k: v['entity_answers'] for k, v in ref_dict.items()} + return preds, refs + + +def filter_dict(result_dict, key_tag): + """ + Filter a subset of the result_dict, where keys ends with 'key_tag'. + """ + filtered = {} + for k, v in result_dict.items(): + if k.endswith(key_tag): + filtered[k] = v + return filtered + + +def get_metrics(pred_result, ref_result, task, source): + """ + Computes metrics. + """ + metrics = {} + + ref_result_filtered = {} + pred_result_filtered = {} + if source == 'both': + ref_result_filtered = ref_result + pred_result_filtered = pred_result + else: + for question_id, info in ref_result.items(): + if info['source'] == source: + ref_result_filtered[question_id] = info + if question_id in pred_result: + pred_result_filtered[question_id] = pred_result[question_id] + + if task == 'main' or task == 'all' \ + or task == 'description': + pred_dict, ref_dict = prepare_bleu(pred_result_filtered, + ref_result_filtered, task) + metrics = compute_bleu_rouge(pred_dict, ref_dict) + elif task == 'yesno': + pred_dict, ref_dict = prepare_bleu(pred_result_filtered, + ref_result_filtered, task) + keys = ['Yes', 'No', 'Depends'] + preds = [filter_dict(pred_dict, k) for k in keys] + refs = [filter_dict(ref_dict, k) for k in keys] + + metrics = compute_bleu_rouge(pred_dict, ref_dict) + + for k, pred, ref in zip(keys, preds, refs): + m = compute_bleu_rouge(pred, ref) + k_metric = [(k + '|' + key, v) for key, v in m.items()] + metrics.update(k_metric) + + elif task == 'entity': + pred_dict, ref_dict = prepare_prf(pred_result_filtered, + ref_result_filtered) + pred_dict_bleu, ref_dict_bleu = prepare_bleu(pred_result_filtered, + ref_result_filtered, task) + metrics = compute_prf(pred_dict, ref_dict) + metrics.update(compute_bleu_rouge(pred_dict_bleu, ref_dict_bleu)) + else: + raise ValueError("Illegal task name: {}".format(task)) + + return metrics + + +def prepare_bleu(pred_result, ref_result, task): + """ + Prepares data for calculation of bleu and rouge scores. + """ + pred_list, ref_list = [], [] + qids = ref_result.keys() + for qid in qids: + if task == 'main': + pred, ref = get_main_result(qid, pred_result, ref_result) + elif task == 'yesno': + pred, ref = get_yesno_result(qid, pred_result, ref_result) + elif task == 'all': + pred, ref = get_all_result(qid, pred_result, ref_result) + elif task == 'entity': + pred, ref = get_entity_result(qid, pred_result, ref_result) + elif task == 'description': + pred, ref = get_desc_result(qid, pred_result, ref_result) + else: + raise ValueError("Illegal task name: {}".format(task)) + if pred and ref: + pred_list += pred + ref_list += ref + pred_dict = dict(pred_list) + ref_dict = dict(ref_list) + for qid, ans in ref_dict.items(): + ref_dict[qid] = normalize(ref_dict[qid]) + pred_dict[qid] = normalize(pred_dict.get(qid, [EMPTY])) + if not ans or ans == [EMPTY]: + del ref_dict[qid] + del pred_dict[qid] + + for k, v in pred_dict.items(): + assert len(v) == 1, \ + "There should be only one predict answer. question_id: {}".format(k) + return pred_dict, ref_dict + + +def get_main_result(qid, pred_result, ref_result): + """ + Prepare answers for task 'main'. + + Args: + qid: question_id. + pred_result: A dict include all question_id's result information read + from args.pred_file. + ref_result: A dict incluce all question_id's result information read + from args.ref_file. + Returns: + Two lists, the first one contains predict result, the second + one contains reference result of the same question_id. Each list has + elements of tuple (question_id, answers), 'answers' is a list of strings. + """ + ref_ans = ref_result[qid]['answers'] + if not ref_ans: + ref_ans = [EMPTY] + pred_ans = pred_result.get(qid, {}).get('answers', [])[:1] + if not pred_ans: + pred_ans = [EMPTY] + + return [(qid, pred_ans)], [(qid, ref_ans)] + + +def get_entity_result(qid, pred_result, ref_result): + """ + Prepare answers for task 'entity'. + + Args: + qid: question_id. + pred_result: A dict include all question_id's result information read + from args.pred_file. + ref_result: A dict incluce all question_id's result information read + from args.ref_file. + Returns: + Two lists, the first one contains predict result, the second + one contains reference result of the same question_id. Each list has + elements of tuple (question_id, answers), 'answers' is a list of strings. + """ + if ref_result[qid]['question_type'] != 'ENTITY': + return None, None + return get_main_result(qid, pred_result, ref_result) + + +def get_desc_result(qid, pred_result, ref_result): + """ + Prepare answers for task 'description'. + + Args: + qid: question_id. + pred_result: A dict include all question_id's result information read + from args.pred_file. + ref_result: A dict incluce all question_id's result information read + from args.ref_file. + Returns: + Two lists, the first one contains predict result, the second + one contains reference result of the same question_id. Each list has + elements of tuple (question_id, answers), 'answers' is a list of strings. + """ + if ref_result[qid]['question_type'] != 'DESCRIPTION': + return None, None + return get_main_result(qid, pred_result, ref_result) + + +def get_yesno_result(qid, pred_result, ref_result): + """ + Prepare answers for task 'yesno'. + + Args: + qid: question_id. + pred_result: A dict include all question_id's result information read + from args.pred_file. + ref_result: A dict incluce all question_id's result information read + from args.ref_file. + Returns: + Two lists, the first one contains predict result, the second + one contains reference result of the same question_id. Each list has + elements of tuple (question_id, answers), 'answers' is a list of strings. + """ + + def _uniq(li, is_ref): + uniq_li = [] + left = [] + keys = set() + for k, v in li: + if k not in keys: + uniq_li.append((k, v)) + keys.add(k) + else: + left.append((k, v)) + + if is_ref: + dict_li = dict(uniq_li) + for k, v in left: + dict_li[k] += v + uniq_li = [(k, v) for k, v in dict_li.items()] + return uniq_li + + def _expand_result(uniq_li): + expanded = uniq_li[:] + keys = set([x[0] for x in uniq_li]) + for k in YESNO_LABELS - keys: + expanded.append((k, [EMPTY])) + return expanded + + def _get_yesno_ans(qid, result_dict, is_ref=False): + if qid not in result_dict: + return [(str(qid) + '_' + k, v) for k, v in _expand_result([])] + yesno_answers = result_dict[qid]['yesno_answers'] + answers = result_dict[qid]['answers'] + lbl_ans = _uniq([(k, [v]) for k, v in zip(yesno_answers, answers)], + is_ref) + ret = [(str(qid) + '_' + k, v) for k, v in _expand_result(lbl_ans)] + return ret + + if ref_result[qid]['question_type'] != 'YES_NO': + return None, None + + ref_ans = _get_yesno_ans(qid, ref_result, is_ref=True) + pred_ans = _get_yesno_ans(qid, pred_result) + return pred_ans, ref_ans + + +def get_all_result(qid, pred_result, ref_result): + """ + Prepare answers for task 'all'. + + Args: + qid: question_id. + pred_result: A dict include all question_id's result information read + from args.pred_file. + ref_result: A dict incluce all question_id's result information read + from args.ref_file. + Returns: + Two lists, the first one contains predict result, the second + one contains reference result of the same question_id. Each list has + elements of tuple (question_id, answers), 'answers' is a list of strings. + """ + if ref_result[qid]['question_type'] == 'YES_NO': + return get_yesno_result(qid, pred_result, ref_result) + return get_main_result(qid, pred_result, ref_result) + + +def format_metrics(metrics, task, err_msg): + """ + Format metrics. 'err' field returns any error occured during evaluation. + + Args: + metrics: A dict object contains metrics for different tasks. + task: Task name. + err_msg: Exception raised during evaluation. + Returns: + Formatted result. + """ + result = {} + sources = ["both", "search", "zhidao"] + if err_msg is not None: + return {'errorMsg': str(err_msg), 'errorCode': 1, 'data': []} + data = [] + if task != 'all' and task != 'main': + sources = ["both"] + + if task == 'entity': + metric_names = ["Bleu-4", "Rouge-L"] + metric_names_prf = ["F1", "Precision", "Recall"] + for name in metric_names + metric_names_prf: + for src in sources: + obj = { + "name": name, + "value": round(metrics[src].get(name, 0) * 100, 2), + "type": src, + } + data.append(obj) + elif task == 'yesno': + metric_names = ["Bleu-4", "Rouge-L"] + details = ["Yes", "No", "Depends"] + src = sources[0] + for name in metric_names: + obj = { + "name": name, + "value": round(metrics[src].get(name, 0) * 100, 2), + "type": 'All', + } + data.append(obj) + for d in details: + obj = { + "name": name, + "value": \ + round(metrics[src].get(d + '|' + name, 0) * 100, 2), + "type": d, + } + data.append(obj) + else: + metric_names = ["Bleu-4", "Rouge-L"] + for name in metric_names: + for src in sources: + obj = { + "name": name, + "value": \ + round(metrics[src].get(name, 0) * 100, 2), + "type": src, + } + data.append(obj) + + result["data"] = data + result["errorCode"] = 0 + result["errorMsg"] = "success" + + return result + + +def main(args): + """ + Do evaluation. + """ + err = None + metrics = {} + try: + pred_result = read_file(args.pred_file, args.task) + ref_result = read_file(args.ref_file, args.task, is_ref=True) + sources = ['both', 'search', 'zhidao'] + if args.task not in set(['main', 'all']): + sources = sources[:1] + for source in sources: + metrics[source] = get_metrics(pred_result, ref_result, args.task, + source) + except ValueError as ve: + err = ve + except AssertionError as ae: + err = ae + + print( + json.dumps( + format_metrics(metrics, args.task, err), ensure_ascii=False).encode( + 'utf8')) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('pred_file', help='predict file') + parser.add_argument('ref_file', help='reference file') + parser.add_argument( + 'task', help='task name: Main|Yes_No|All|Entity|Description') + + args = parser.parse_args() + args.task = args.task.lower().replace('_', '') + main(args) diff --git a/fluid/machine_reading_comprehesion/utils/get_vocab.py b/fluid/machine_reading_comprehesion/utils/get_vocab.py new file mode 100644 index 0000000000000000000000000000000000000000..91de46a1f3f75a64e53d2e44716312fab1bd9323 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/get_vocab.py @@ -0,0 +1,67 @@ +# -*- coding:utf8 -*- +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +Utility function to generate vocabulary file. +""" + +import argparse +import sys +import json + +from itertools import chain + + +def get_vocab(files, vocab_file): + """ + Builds vocabulary file from field 'segmented_paragraphs' + and 'segmented_question'. + + Args: + files: A list of file names. + vocab_file: The file that stores the vocabulary. + """ + vocab = {} + for f in files: + with open(f, 'r') as fin: + for line in fin: + obj = json.loads(line.strip()) + paras = [ + chain(*d['segmented_paragraphs']) for d in obj['documents'] + ] + doc_tokens = chain(*paras) + question_tokens = obj['segmented_question'] + for t in list(doc_tokens) + question_tokens: + vocab[t] = vocab.get(t, 0) + 1 + # output + sorted_vocab = sorted( + [(v, c) for v, c in vocab.items()], key=lambda x: x[1], reverse=True) + with open(vocab_file, 'w') as outf: + for w, c in sorted_vocab: + print >> outf, '{}\t{}'.format(w.encode('utf8'), c) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '--files', + nargs='+', + required=True, + help='file list to count vocab from.') + parser.add_argument( + '--vocab', required=True, help='file to store counted vocab.') + args = parser.parse_args() + get_vocab(args.files, args.vocab) diff --git a/fluid/machine_reading_comprehesion/utils/marco_tokenize_data.py b/fluid/machine_reading_comprehesion/utils/marco_tokenize_data.py new file mode 100644 index 0000000000000000000000000000000000000000..a93c2835623a746bb1d0a36fde9b2ad28dbd2497 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/marco_tokenize_data.py @@ -0,0 +1,46 @@ +#coding=utf8 + +import os, sys, json +import nltk + + +def _nltk_tokenize(sequence): + tokens = nltk.word_tokenize(sequence) + + cur_char_offset = 0 + token_offsets = [] + token_words = [] + for token in tokens: + cur_char_offset = sequence.find(token, cur_char_offset) + token_offsets.append( + [cur_char_offset, cur_char_offset + len(token) - 1]) + token_words.append(token) + return token_offsets, token_words + + +def segment(input_js): + _, input_js['segmented_question'] = _nltk_tokenize(input_js['question']) + for doc_id, doc in enumerate(input_js['documents']): + doc['segmented_title'] = [] + doc['segmented_paragraphs'] = [] + for para_id, para in enumerate(doc['paragraphs']): + _, seg_para = _nltk_tokenize(para) + doc['segmented_paragraphs'].append(seg_para) + if 'answers' in input_js: + input_js['segmented_answers'] = [] + for answer_id, answer in enumerate(input_js['answers']): + _, seg_answer = _nltk_tokenize(answer) + input_js['segmented_answers'].append(seg_answer) + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Usage: tokenize_data.py ') + exit() + + nltk.download('punkt') + + for line in open(sys.argv[1]): + dureader_js = json.loads(line.strip()) + segment(dureader_js) + print(json.dumps(dureader_js)) diff --git a/fluid/machine_reading_comprehesion/utils/marcov1_to_dureader.py b/fluid/machine_reading_comprehesion/utils/marcov1_to_dureader.py new file mode 100644 index 0000000000000000000000000000000000000000..022db4dd1bdf98d2a7e0ead659e988ff109b59e9 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/marcov1_to_dureader.py @@ -0,0 +1,37 @@ +#coding=utf8 + +import sys +import json +import pandas as pd + + +def trans(input_js): + output_js = {} + output_js['question'] = input_js['query'] + output_js['question_type'] = input_js['query_type'] + output_js['question_id'] = input_js['query_id'] + output_js['fact_or_opinion'] = "" + output_js['documents'] = [] + for para_id, para in enumerate(input_js['passages']): + doc = {} + doc['title'] = "" + if 'is_selected' in para: + doc['is_selected'] = True if para['is_selected'] != 0 else False + doc['paragraphs'] = [para['passage_text']] + output_js['documents'].append(doc) + + if 'answers' in input_js: + output_js['answers'] = input_js['answers'] + return output_js + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Usage: marcov1_to_dureader.py ') + exit() + + df = pd.read_json(sys.argv[1]) + for row in df.iterrows(): + marco_js = json.loads(row[1].to_json()) + dureader_js = trans(marco_js) + print(json.dumps(dureader_js)) diff --git a/fluid/machine_reading_comprehesion/utils/marcov2_to_v1_tojsonl.py b/fluid/machine_reading_comprehesion/utils/marcov2_to_v1_tojsonl.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb24756c64e04365c23603e86e09f107a1f7721 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/marcov2_to_v1_tojsonl.py @@ -0,0 +1,14 @@ +import sys +import json +import pandas as pd + +if __name__ == '__main__': + if len(sys.argv) != 3: + print('Usage: tojson.py ') + exit() + infile = sys.argv[1] + outfile = sys.argv[2] + df = pd.read_json(infile) + with open(outfile, 'w') as f: + for row in df.iterrows(): + f.write(row[1].to_json() + '\n') diff --git a/fluid/machine_reading_comprehesion/utils/preprocess.py b/fluid/machine_reading_comprehesion/utils/preprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..075d26e45fc5ce116fe12cdc7b958296d24e0f17 --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/preprocess.py @@ -0,0 +1,219 @@ +############################################################################### +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This module finds the most related paragraph of each document according to recall. +""" + +import sys +if sys.version[0] == '2': + reload(sys) + sys.setdefaultencoding("utf-8") +import json +from collections import Counter + + +def precision_recall_f1(prediction, ground_truth): + """ + This function calculates and returns the precision, recall and f1-score + Args: + prediction: prediction string or list to be matched + ground_truth: golden string or list reference + Returns: + floats of (p, r, f1) + Raises: + None + """ + if not isinstance(prediction, list): + prediction_tokens = prediction.split() + else: + prediction_tokens = prediction + if not isinstance(ground_truth, list): + ground_truth_tokens = ground_truth.split() + else: + ground_truth_tokens = ground_truth + common = Counter(prediction_tokens) & Counter(ground_truth_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0, 0, 0 + p = 1.0 * num_same / len(prediction_tokens) + r = 1.0 * num_same / len(ground_truth_tokens) + f1 = (2 * p * r) / (p + r) + return p, r, f1 + + +def recall(prediction, ground_truth): + """ + This function calculates and returns the recall + Args: + prediction: prediction string or list to be matched + ground_truth: golden string or list reference + Returns: + floats of recall + Raises: + None + """ + return precision_recall_f1(prediction, ground_truth)[1] + + +def f1_score(prediction, ground_truth): + """ + This function calculates and returns the f1-score + Args: + prediction: prediction string or list to be matched + ground_truth: golden string or list reference + Returns: + floats of f1 + Raises: + None + """ + return precision_recall_f1(prediction, ground_truth)[2] + + +def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): + """ + This function calculates and returns the precision, recall and f1-score + Args: + metric_fn: metric function pointer which calculates scores according to corresponding logic. + prediction: prediction string or list to be matched + ground_truth: golden string or list reference + Returns: + floats of (p, r, f1) + Raises: + None + """ + scores_for_ground_truths = [] + for ground_truth in ground_truths: + score = metric_fn(prediction, ground_truth) + scores_for_ground_truths.append(score) + return max(scores_for_ground_truths) + + +def find_best_question_match(doc, question, with_score=False): + """ + For each docment, find the paragraph that matches best to the question. + Args: + doc: The document object. + question: The question tokens. + with_score: If True then the match score will be returned, + otherwise False. + Returns: + The index of the best match paragraph, if with_score=False, + otherwise returns a tuple of the index of the best match paragraph + and the match score of that paragraph. + """ + most_related_para = -1 + max_related_score = 0 + most_related_para_len = 0 + for p_idx, para_tokens in enumerate(doc['segmented_paragraphs']): + if len(question) > 0: + related_score = metric_max_over_ground_truths(recall, para_tokens, + question) + else: + related_score = 0 + + if related_score > max_related_score \ + or (related_score == max_related_score \ + and len(para_tokens) < most_related_para_len): + most_related_para = p_idx + max_related_score = related_score + most_related_para_len = len(para_tokens) + if most_related_para == -1: + most_related_para = 0 + if with_score: + return most_related_para, max_related_score + return most_related_para + + +def find_fake_answer(sample): + """ + For each document, finds the most related paragraph based on recall, + then finds a span that maximize the f1_score compared with the gold answers + and uses this span as a fake answer span + Args: + sample: a sample in the dataset + Returns: + None + Raises: + None + """ + for doc in sample['documents']: + most_related_para = -1 + most_related_para_len = 999999 + max_related_score = 0 + for p_idx, para_tokens in enumerate(doc['segmented_paragraphs']): + if len(sample['segmented_answers']) > 0: + related_score = metric_max_over_ground_truths( + recall, para_tokens, sample['segmented_answers']) + else: + continue + if related_score > max_related_score \ + or (related_score == max_related_score + and len(para_tokens) < most_related_para_len): + most_related_para = p_idx + most_related_para_len = len(para_tokens) + max_related_score = related_score + doc['most_related_para'] = most_related_para + + sample['answer_docs'] = [] + sample['answer_spans'] = [] + sample['fake_answers'] = [] + sample['match_scores'] = [] + + best_match_score = 0 + best_match_d_idx, best_match_span = -1, [-1, -1] + best_fake_answer = None + answer_tokens = set() + for segmented_answer in sample['segmented_answers']: + answer_tokens = answer_tokens | set( + [token for token in segmented_answer]) + for d_idx, doc in enumerate(sample['documents']): + if not doc['is_selected']: + continue + if doc['most_related_para'] == -1: + doc['most_related_para'] = 0 + most_related_para_tokens = doc['segmented_paragraphs'][doc[ + 'most_related_para']][:1000] + for start_tidx in range(len(most_related_para_tokens)): + if most_related_para_tokens[start_tidx] not in answer_tokens: + continue + for end_tidx in range( + len(most_related_para_tokens) - 1, start_tidx - 1, -1): + span_tokens = most_related_para_tokens[start_tidx:end_tidx + 1] + if len(sample['segmented_answers']) > 0: + match_score = metric_max_over_ground_truths( + f1_score, span_tokens, sample['segmented_answers']) + else: + match_score = 0 + if match_score == 0: + break + if match_score > best_match_score: + best_match_d_idx = d_idx + best_match_span = [start_tidx, end_tidx] + best_match_score = match_score + best_fake_answer = ''.join(span_tokens) + if best_match_score > 0: + sample['answer_docs'].append(best_match_d_idx) + sample['answer_spans'].append(best_match_span) + sample['fake_answers'].append(best_fake_answer) + sample['match_scores'].append(best_match_score) + + +if __name__ == '__main__': + for line in sys.stdin: + sample = json.loads(line) + find_fake_answer(sample) + print(json.dumps(sample, encoding='utf8', ensure_ascii=False)) diff --git a/fluid/machine_reading_comprehesion/utils/run_marco2dureader_preprocess.sh b/fluid/machine_reading_comprehesion/utils/run_marco2dureader_preprocess.sh new file mode 100644 index 0000000000000000000000000000000000000000..fcb7d67a002ef15384b7c725eb14f3a4dd64ec9e --- /dev/null +++ b/fluid/machine_reading_comprehesion/utils/run_marco2dureader_preprocess.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +input_file=$1 +output_file=$2 + +# convert the data from MARCO V2 (json) format to MARCO V1 (jsonl) format. +# the script was forked from MARCO repo. +# the format of MARCO V1 is much more easier to explore. +python3 marcov2_to_v1_tojsonl.py $input_file $input_file.marcov1 + +# convert the data from MARCO V1 format to DuReader format. +python3 marcov1_to_dureader.py $input_file.marcov1 >$input_file.dureader_raw + +# tokenize the data. +python3 marco_tokenize_data.py $input_file.dureader_raw >$input_file.segmented + +# find fake answers (indicating the start and end positions of answers in the document) for train and dev sets. +# note that this should not be applied for test set, since there is no ground truth in test set. +python preprocess.py $input_file.segmented >$output_file + +# remove the temporal data files. +rm -rf $input_file.dureader_raw $input_file.segmented diff --git a/fluid/machine_reading_comprehesion/vocab.py b/fluid/machine_reading_comprehesion/vocab.py new file mode 100644 index 0000000000000000000000000000000000000000..14b608052132cc5c6f46810778511bc9a6a6915b --- /dev/null +++ b/fluid/machine_reading_comprehesion/vocab.py @@ -0,0 +1,199 @@ +# -*- coding:utf8 -*- +# ============================================================================== +# Copyright 2017 Baidu.com, Inc. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +""" +This module implements the Vocab class for converting string to id and back +""" + +import numpy as np + + +class Vocab(object): + """ + Implements a vocabulary to store the tokens in the data, with their corresponding embeddings. + """ + + def __init__(self, filename=None, initial_tokens=None, lower=False): + self.id2token = {} + self.token2id = {} + self.token_cnt = {} + self.lower = lower + + self.embed_dim = None + self.embeddings = None + + self.pad_token = '' + self.unk_token = '' + + self.initial_tokens = initial_tokens if initial_tokens is not None else [] + self.initial_tokens.extend([self.pad_token, self.unk_token]) + for token in self.initial_tokens: + self.add(token) + + if filename is not None: + self.load_from_file(filename) + + def size(self): + """ + get the size of vocabulary + Returns: + an integer indicating the size + """ + return len(self.id2token) + + def load_from_file(self, file_path): + """ + loads the vocab from file_path + Args: + file_path: a file with a word in each line + """ + for line in open(file_path, 'r'): + token = line.rstrip('\n') + self.add(token) + + def get_id(self, token): + """ + gets the id of a token, returns the id of unk token if token is not in vocab + Args: + key: a string indicating the word + Returns: + an integer + """ + token = token.lower() if self.lower else token + try: + return self.token2id[token] + except KeyError: + return self.token2id[self.unk_token] + + def get_token(self, idx): + """ + gets the token corresponding to idx, returns unk token if idx is not in vocab + Args: + idx: an integer + returns: + a token string + """ + try: + return self.id2token[idx] + except KeyError: + return self.unk_token + + def add(self, token, cnt=1): + """ + adds the token to vocab + Args: + token: a string + cnt: a num indicating the count of the token to add, default is 1 + """ + token = token.lower() if self.lower else token + if token in self.token2id: + idx = self.token2id[token] + else: + idx = len(self.id2token) + self.id2token[idx] = token + self.token2id[token] = idx + if cnt > 0: + if token in self.token_cnt: + self.token_cnt[token] += cnt + else: + self.token_cnt[token] = cnt + return idx + + def filter_tokens_by_cnt(self, min_cnt): + """ + filter the tokens in vocab by their count + Args: + min_cnt: tokens with frequency less than min_cnt is filtered + """ + filtered_tokens = [ + token for token in self.token2id if self.token_cnt[token] >= min_cnt + ] + # rebuild the token x id map + self.token2id = {} + self.id2token = {} + for token in self.initial_tokens: + self.add(token, cnt=0) + for token in filtered_tokens: + self.add(token, cnt=0) + + def randomly_init_embeddings(self, embed_dim): + """ + randomly initializes the embeddings for each token + Args: + embed_dim: the size of the embedding for each token + """ + self.embed_dim = embed_dim + self.embeddings = np.random.rand(self.size(), embed_dim) + for token in [self.pad_token, self.unk_token]: + self.embeddings[self.get_id(token)] = np.zeros([self.embed_dim]) + + def load_pretrained_embeddings(self, embedding_path): + """ + loads the pretrained embeddings from embedding_path, + tokens not in pretrained embeddings will be filtered + Args: + embedding_path: the path of the pretrained embedding file + """ + trained_embeddings = {} + with open(embedding_path, 'r') as fin: + for line in fin: + contents = line.strip().split() + token = contents[0].decode('utf8') + if token not in self.token2id: + continue + trained_embeddings[token] = list(map(float, contents[1:])) + if self.embed_dim is None: + self.embed_dim = len(contents) - 1 + filtered_tokens = trained_embeddings.keys() + # rebuild the token x id map + self.token2id = {} + self.id2token = {} + for token in self.initial_tokens: + self.add(token, cnt=0) + for token in filtered_tokens: + self.add(token, cnt=0) + # load embeddings + self.embeddings = np.zeros([self.size(), self.embed_dim]) + for token in self.token2id.keys(): + if token in trained_embeddings: + self.embeddings[self.get_id(token)] = trained_embeddings[token] + + def convert_to_ids(self, tokens): + """ + Convert a list of tokens to ids, use unk_token if the token is not in vocab. + Args: + tokens: a list of token + Returns: + a list of ids + """ + vec = [self.get_id(label) for label in tokens] + return vec + + def recover_from_ids(self, ids, stop_id=None): + """ + Convert a list of ids to tokens, stop converting if the stop_id is encountered + Args: + ids: a list of ids to convert + stop_id: the stop id, default is None + Returns: + a list of tokens + """ + tokens = [] + for i in ids: + tokens += [self.get_token(i)] + if stop_id is not None and i == stop_id: + break + return tokens diff --git a/fluid/metric_learning/losses/datareader.py b/fluid/metric_learning/losses/datareader.py index 97c401e5ae5c08eac4152359e71c6237fd129d28..c3f04eb96665fee339d8e70fa86d691a26c09031 100644 --- a/fluid/metric_learning/losses/datareader.py +++ b/fluid/metric_learning/losses/datareader.py @@ -4,7 +4,6 @@ import random import cPickle import functools import numpy as np -#import paddle.v2 as paddle import paddle from PIL import Image, ImageEnhance diff --git a/fluid/neural_machine_translation/rnn_search/_ce.py b/fluid/neural_machine_translation/rnn_search/_ce.py index e948336e82141c4a2072a02f73b51cb7b4396ca0..e00ac49273ba4bf489e9b837d65d448eaa2aea43 100644 --- a/fluid/neural_machine_translation/rnn_search/_ce.py +++ b/fluid/neural_machine_translation/rnn_search/_ce.py @@ -7,9 +7,9 @@ from kpi import CostKpi, DurationKpi, AccKpi #### NOTE kpi.py should shared in models in some way!!!! -train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=True) -test_cost_kpi = CostKpi('test_cost', 0.005, 0, actived=True) -train_duration_kpi = DurationKpi('train_duration', 0.06, 0, actived=True) +train_cost_kpi = CostKpi('train_cost', 0.02, 0, actived=False) +test_cost_kpi = CostKpi('test_cost', 0.005, 0, actived=False) +train_duration_kpi = DurationKpi('train_duration', 0.06, 0, actived=False) tracking_kpis = [ train_cost_kpi, diff --git a/fluid/neural_machine_translation/rnn_search/attention_model.py b/fluid/neural_machine_translation/rnn_search/attention_model.py index 3cf23a96efcdf1fe69fbf26905bcd8a113db6a7d..0c72697786819179dabce477a9c8d1be760dca28 100644 --- a/fluid/neural_machine_translation/rnn_search/attention_model.py +++ b/fluid/neural_machine_translation/rnn_search/attention_model.py @@ -122,9 +122,8 @@ def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim, decoder_state_expand = fluid.layers.sequence_expand( x=decoder_state_proj, y=encoder_proj) # concated lod should inherit from encoder_proj - concated = fluid.layers.concat( - input=[encoder_proj, decoder_state_expand], axis=1) - attention_weights = fluid.layers.fc(input=concated, + mixed_state = encoder_proj + decoder_state_expand + attention_weights = fluid.layers.fc(input=mixed_state, size=1, bias_attr=False) attention_weights = fluid.layers.sequence_softmax( diff --git a/fluid/neural_machine_translation/transformer/infer.py b/fluid/neural_machine_translation/transformer/infer.py index 666f9ccc4f9ef0242279c21eb78ec15bf2b11bbc..6fc04a9422c136d941559d1b45af8bd88c2d2460 100644 --- a/fluid/neural_machine_translation/transformer/infer.py +++ b/fluid/neural_machine_translation/transformer/infer.py @@ -161,7 +161,7 @@ def fast_infer(test_data, trg_idx2word): ]) # This is used here to set dropout to the test mode. - infer_program = fluid.default_main_program().inference_optimize() + infer_program = fluid.default_main_program().clone(for_test=True) for batch_id, data in enumerate(test_data.batch_generator()): data_input = prepare_batch_input( diff --git a/fluid/neural_machine_translation/transformer/model.py b/fluid/neural_machine_translation/transformer/model.py index 671df316d9da761e17d9725330bacba0c869b6e3..7f537dbc13c89c404dd8d34ad442c78a09876ee5 100644 --- a/fluid/neural_machine_translation/transformer/model.py +++ b/fluid/neural_machine_translation/transformer/model.py @@ -219,6 +219,7 @@ def prepare_encoder_decoder(src_word, size=[src_max_len, src_emb_dim], param_attr=fluid.ParamAttr( name=pos_enc_param_name, trainable=False)) + src_pos_enc.stop_gradient = True enc_input = src_word_emb + src_pos_enc return layers.dropout( enc_input, @@ -639,7 +640,8 @@ def wrap_decoder(trg_vocab_size, if weight_sharing: predict = layers.matmul( x=dec_output, - y=fluid.get_var(word_emb_param_names[0]), + y=fluid.default_main_program().global_block().var( + word_emb_param_names[0]), transpose_y=True) else: predict = layers.fc(input=dec_output, diff --git a/fluid/neural_machine_translation/transformer/reader.py b/fluid/neural_machine_translation/transformer/reader.py index c33e9d8f8a967de5283f95c0937c2296f4eefcbe..eb485793584c64f610ede11efda312100584bc9e 100644 --- a/fluid/neural_machine_translation/transformer/reader.py +++ b/fluid/neural_machine_translation/transformer/reader.py @@ -1,4 +1,5 @@ import glob +import six import os import tarfile @@ -262,8 +263,10 @@ class DataReader(object): if not os.path.isfile(fpath): raise IOError("Invalid file: %s" % fpath) - with open(fpath, "r") as f: + with open(fpath, "rb") as f: for line in f: + if six.PY3: + line = line.decode() fields = line.strip("\n").split(self._field_delimiter) if (not self._only_src and len(fields) == 2) or ( self._only_src and len(fields) == 1): @@ -272,8 +275,10 @@ class DataReader(object): @staticmethod def load_dict(dict_path, reverse=False): word_dict = {} - with open(dict_path, "r") as fdict: + with open(dict_path, "rb") as fdict: for idx, line in enumerate(fdict): + if six.PY3: + line = line.decode() if reverse: word_dict[idx] = line.strip("\n") else: diff --git a/fluid/neural_machine_translation/transformer/train.py b/fluid/neural_machine_translation/transformer/train.py index a800c1912ad2e349fe63b546dcddaa181974cf18..f3432ccbd4c5dc8e9d9d669886945ed37303224a 100644 --- a/fluid/neural_machine_translation/transformer/train.py +++ b/fluid/neural_machine_translation/transformer/train.py @@ -427,7 +427,7 @@ def train_loop(exe, train_prog, startup_prog, dev_count, sum_cost, avg_cost, # Since the token number differs among devices, customize gradient scale to # use token average cost among multi-devices. and the gradient scale is # `1 / token_number` for average cost. - build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized + # build_strategy.gradient_scale_strategy = fluid.BuildStrategy.GradientScaleStrategy.Customized train_exe = fluid.ParallelExecutor( use_cuda=TrainTaskConfig.use_gpu, loss_name=avg_cost.name, diff --git a/fluid/object_detection/.gitignore b/fluid/object_detection/.gitignore index 4e8c219d43e37b545f8c433ea50000cc7c70a656..a4552fd2acc864059f0cee0d88f96c0b5bd73aa0 100644 --- a/fluid/object_detection/.gitignore +++ b/fluid/object_detection/.gitignore @@ -20,3 +20,4 @@ data/pascalvoc/trainval.txt log* *.log +ssd_mobilenet_v1_pascalvoc* diff --git a/fluid/object_detection/train.py b/fluid/object_detection/train.py index 6e763ea1d4ae1a2579238aa4388bc6425b1400f7..2aa0779c202330add779c5b41bb094f961a03952 100644 --- a/fluid/object_detection/train.py +++ b/fluid/object_detection/train.py @@ -38,7 +38,8 @@ train_parameters = { "batch_size": 64, "lr": 0.001, "lr_epochs": [40, 60, 80, 100], - "lr_decay": [1, 0.5, 0.25, 0.1, 0.01] + "lr_decay": [1, 0.5, 0.25, 0.1, 0.01], + "ap_version": '11point', }, "coco2014": { "train_images": 82783, @@ -47,7 +48,8 @@ train_parameters = { "batch_size": 64, "lr": 0.001, "lr_epochs": [12, 19], - "lr_decay": [1, 0.5, 0.25] + "lr_decay": [1, 0.5, 0.25], + "ap_version": 'integral', # should use eval_coco_map.py to test model }, "coco2017": { "train_images": 118287, @@ -56,7 +58,8 @@ train_parameters = { "batch_size": 64, "lr": 0.001, "lr_epochs": [12, 19], - "lr_decay": [1, 0.5, 0.25] + "lr_decay": [1, 0.5, 0.25], + "ap_version": 'integral', # should use eval_coco_map.py to test model } } @@ -77,6 +80,7 @@ def optimizer_setting(train_params): def build_program(main_prog, startup_prog, train_params, is_train): image_shape = train_params['image_shape'] class_num = train_params['class_num'] + ap_version = train_params['ap_version'] with fluid.program_guard(main_prog, startup_prog): py_reader = fluid.layers.py_reader( capacity=64, @@ -97,16 +101,15 @@ def build_program(main_prog, startup_prog, train_params, is_train): nmsed_out = fluid.layers.detection_output( locs, confs, box, box_var, nms_threshold=0.45) - with fluid.program_guard(main_prog): - loss = fluid.evaluator.DetectionMAP( - nmsed_out, - gt_label, - gt_box, - difficult, - class_num, - overlap_threshold=0.5, - evaluate_difficult=False, - ap_version=args.ap_version) + loss = fluid.evaluator.DetectionMAP( + nmsed_out, + gt_label, + gt_box, + difficult, + class_num, + overlap_threshold=0.5, + evaluate_difficult=False, + ap_version=ap_version) return py_reader, loss @@ -230,7 +233,7 @@ def train(args, loss_v = np.mean(np.array(loss_v)) every_epoc_loss.append(loss_v) if batch_id % 20 == 0: - print("Epoc {0}, batch {1}, loss {2}, time {3}".format( + print("Epoc {:d}, batch {:d}, loss {:.6f}, time {:.5f}".format( epoc_id, batch_id, loss_v, start_time - prev_start_time)) end_time = time.time() total_time += end_time - start_time diff --git a/fluid/ocr_recognition/attention_model.py b/fluid/ocr_recognition/attention_model.py index 363c03070e98c721a63891ca8c7f35ce7046ac6d..d3d2185bb3685e4dd048e9bbf2990f90bf1c2254 100755 --- a/fluid/ocr_recognition/attention_model.py +++ b/fluid/ocr_recognition/attention_model.py @@ -2,6 +2,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle.fluid as fluid +import six decoder_size = 128 word_vector_dim = 128 @@ -22,7 +23,7 @@ def conv_bn_pool(input, pool=True, use_cudnn=True): tmp = input - for i in xrange(group): + for i in six.moves.xrange(group): filter_size = 3 conv_std = (2.0 / (filter_size**2 * tmp.shape[1]))**0.5 conv_param = fluid.ParamAttr( diff --git a/fluid/ocr_recognition/eval.py b/fluid/ocr_recognition/eval.py index 1d553999eb545e3a1134658e78592fb74a4a8c3c..a19ca4a34f9b89809dce3d87cf10d5348850f4e2 100644 --- a/fluid/ocr_recognition/eval.py +++ b/fluid/ocr_recognition/eval.py @@ -1,4 +1,3 @@ -import paddle.v2 as paddle import paddle.fluid as fluid from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_data from attention_model import attention_eval diff --git a/fluid/ocr_recognition/infer.py b/fluid/ocr_recognition/infer.py index 6a28314dc3abf551ef016a4f61e973643d000b2b..5c3e1f240c45af2421e4fd15165d590a81db02ae 100755 --- a/fluid/ocr_recognition/infer.py +++ b/fluid/ocr_recognition/infer.py @@ -1,5 +1,4 @@ from __future__ import print_function -import paddle.v2 as paddle import paddle.fluid as fluid from utility import add_arguments, print_arguments, to_lodtensor, get_ctc_feeder_data, get_attention_feeder_for_infer import paddle.fluid.profiler as profiler diff --git a/fluid/policy_gradient/brain.py b/fluid/policy_gradient/brain.py index 314aec52ea56a3887e14e69786107d7b98319ab2..27a2da28563e5063213100d34c1b88d5fe2f91b0 100644 --- a/fluid/policy_gradient/brain.py +++ b/fluid/policy_gradient/brain.py @@ -1,5 +1,4 @@ import numpy as np -import paddle.v2 as paddle import paddle.fluid as fluid # reproducible np.random.seed(1) diff --git a/fluid/video_classification/eval.py b/fluid/video_classification/eval.py index 91b8d445978f78b9b0883ac2717831904eca09bb..130e682c1b03e8203dd0d36124496bb82c81564c 100644 --- a/fluid/video_classification/eval.py +++ b/fluid/video_classification/eval.py @@ -2,7 +2,7 @@ import os import numpy as np import time import sys -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid from resnet import TSN_ResNet import reader diff --git a/fluid/video_classification/infer.py b/fluid/video_classification/infer.py index 1f3048b1a89f1f218248d6e5760d08683000343d..15cc2b53d918f70acf43da7eb1e095c1c03e0c4e 100644 --- a/fluid/video_classification/infer.py +++ b/fluid/video_classification/infer.py @@ -2,7 +2,7 @@ import os import numpy as np import time import sys -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid from resnet import TSN_ResNet import reader diff --git a/fluid/video_classification/reader.py b/fluid/video_classification/reader.py index e688b66487f2615e19d468fa36f4cb2dc7578a54..11cfaa5b3ddc949d20f7f33d15f957cba5225919 100644 --- a/fluid/video_classification/reader.py +++ b/fluid/video_classification/reader.py @@ -5,7 +5,7 @@ import functools import cPickle from cStringIO import StringIO import numpy as np -import paddle.v2 as paddle +import paddle from PIL import Image, ImageEnhance random.seed(0) diff --git a/fluid/video_classification/train.py b/fluid/video_classification/train.py index a4171b2faf5fb63c37607fbe2fea21416c3e0441..c879bf688233dce5d1ce839af76ca41164e3a571 100644 --- a/fluid/video_classification/train.py +++ b/fluid/video_classification/train.py @@ -2,7 +2,7 @@ import os import numpy as np import time import sys -import paddle.v2 as paddle +import paddle import paddle.fluid as fluid from resnet import TSN_ResNet import reader diff --git a/v2/README.cn.md b/legacy/README.cn.md similarity index 100% rename from v2/README.cn.md rename to legacy/README.cn.md diff --git a/v2/README.md b/legacy/README.md similarity index 88% rename from v2/README.md rename to legacy/README.md index 643cb288d15ecffc52fd3a920b5ee51b390695d2..f7741c9b7b1c39e569e74606d054847b27a206d8 100644 --- a/v2/README.md +++ b/legacy/README.md @@ -12,23 +12,23 @@ The word embedding expresses words with a real vector. Each dimension of the vec In the example of word vectors, we show how to use Hierarchical-Sigmoid and Noise Contrastive Estimation (NCE) to accelerate word-vector learning. -- 1.1 [Hsigmoid Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/v2/hsigmoid) -- 1.2 [Noise Contrastive Estimation Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/v2/nce_cost) +- 1.1 [Hsigmoid Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/legacy/hsigmoid) +- 1.2 [Noise Contrastive Estimation Accelerated Word Vector Training](https://github.com/PaddlePaddle/models/tree/develop/legacy/nce_cost) ## 2. RNN language model The language model is important in the field of natural language processing. In addition to getting the word vector (a by-product of language model training), it can also help us to generate text. Given a number of words, the language model can help us predict the next most likely word. In the example of using the language model to generate text, we focus on the recurrent neural network language model. We can use the instructions in the document quickly adapt to their training corpus, complete automatic writing poetry, automatic writing prose and other interesting models. -- 2.1 [Generate text using the RNN language model](https://github.com/PaddlePaddle/models/tree/develop/v2/generate_sequence_by_rnn_lm) +- 2.1 [Generate text using the RNN language model](https://github.com/PaddlePaddle/models/tree/develop/legacy/generate_sequence_by_rnn_lm) ## 3. Click-Through Rate prediction The click-through rate model predicts the probability that a user will click on an ad. This is widely used for advertising technology. Logistic Regression has a good learning performance for large-scale sparse features in the early stages of the development of click-through rate prediction. In recent years, DNN model because of its strong learning ability to gradually take the banner rate of the task of the banner. In the example of click-through rate estimates, we first give the Google's Wide & Deep model. This model combines the advantages of DNN and the applicable logistic regression model for DNN and large-scale sparse features. Then we provide the deep factorization machine for click-through rate prediction. The deep factorization machine combines the factorization machine and deep neural networks to model both low order and high order interactions of input features. -- 3.1 [Click-Through Rate Model](https://github.com/PaddlePaddle/models/tree/develop/v2/ctr) -- 3.2 [Deep Factorization Machine for Click-Through Rate prediction](https://github.com/PaddlePaddle/models/tree/develop/v2/deep_fm) +- 3.1 [Click-Through Rate Model](https://github.com/PaddlePaddle/models/tree/develop/legacy/ctr) +- 3.2 [Deep Factorization Machine for Click-Through Rate prediction](https://github.com/PaddlePaddle/models/tree/develop/legacy/deep_fm) ## 4. Text classification @@ -36,7 +36,7 @@ Text classification is one of the most basic tasks in natural language processin For text classification, we provide a non-sequential text classification model based on DNN and CNN. (For LSTM-based model, please refer to PaddleBook [Sentiment Analysis](http://www.paddlepaddle.org/docs/develop/book/06.understand_sentiment/index.html)). -- 4.1 [Sentiment analysis based on DNN / CNN](https://github.com/PaddlePaddle/models/tree/develop/v2/text_classification) +- 4.1 [Sentiment analysis based on DNN / CNN](https://github.com/PaddlePaddle/models/tree/develop/legacy/text_classification) ## 5. Learning to rank @@ -45,14 +45,14 @@ The depth neural network can be used to model the fractional function to form va The algorithms for learning to rank are usually categorized into three groups by their input representation and the loss function. These are pointwise, pairwise and listwise approaches. Here we demonstrate RankLoss loss function method (pairwise approach), and LambdaRank loss function method (listwise approach). (For Pointwise approaches, please refer to [Recommended System](http://www.paddlepaddle.org/docs/develop/book/05.recommender_system/index.html)). -- 5.1 [Learning to rank based on Pairwise and Listwise approches](https://github.com/PaddlePaddle/models/tree/develop/v2/ltr) +- 5.1 [Learning to rank based on Pairwise and Listwise approches](https://github.com/PaddlePaddle/models/tree/develop/legacy/ltr) ## 6. Semantic model The deep structured semantic model uses the DNN model to learn the vector representation of the low latitude in a continuous semantic space, finally models the semantic similarity between the two sentences. In this example, we demonstrate how to use PaddlePaddle to implement a generic deep structured semantic model to model the semantic similarity between two strings. The model supports different network structures such as CNN (Convolutional Network), FC (Fully Connected Network), RNN (Recurrent Neural Network), and different loss functions such as classification, regression, and sequencing. -- 6.1 [Deep structured semantic model](https://github.com/PaddlePaddle/models/tree/develop/v2/dssm) +- 6.1 [Deep structured semantic model](https://github.com/PaddlePaddle/models/tree/develop/legacy/dssm) ## 7. Sequence tagging @@ -60,7 +60,7 @@ Given the input sequence, the sequence tagging model is one of the most basic ta In the example of the sequence tagging, we describe how to train an end-to-end sequence tagging model with the Named Entity Recognition (NER) task as an example. -- 7.1 [Name Entity Recognition](https://github.com/PaddlePaddle/models/tree/develop/v2/sequence_tagging_for_ner) +- 7.1 [Name Entity Recognition](https://github.com/PaddlePaddle/models/tree/develop/legacy/sequence_tagging_for_ner) ## 8. Sequence to sequence learning @@ -68,19 +68,19 @@ Sequence-to-sequence model has a wide range of applications. This includes machi As an example for sequence-to-sequence learning, we take the machine translation task. We demonstrate the sequence-to-sequence mapping model without attention mechanism, which is the basis for all sequence-to-sequence learning models. We will use scheduled sampling to improve the problem of error accumulation in the RNN model, and machine translation with external memory mechanism. -- 8.1 [Basic Sequence-to-sequence model](https://github.com/PaddlePaddle/models/tree/develop/v2/nmt_without_attention) +- 8.1 [Basic Sequence-to-sequence model](https://github.com/PaddlePaddle/models/tree/develop/legacy/nmt_without_attention) ## 9. Image classification For the example of image classification, we show you how to train AlexNet, VGG, GoogLeNet, ResNet, Inception-v4, Inception-Resnet-V2 and Xception models in PaddlePaddle. It also provides model conversion tools that convert Caffe or TensorFlow trained model files into PaddlePaddle model files. -- 9.1 [convert Caffe model file to PaddlePaddle model file](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification/caffe2paddle) -- 9.2 [convert TensorFlow model file to PaddlePaddle model file](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification/tf2paddle) -- 9.3 [AlexNet](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification) -- 9.4 [VGG](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification) -- 9.5 [Residual Network](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification) -- 9.6 [Inception-v4](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification) -- 9.7 [Inception-Resnet-V2](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification) -- 9.8 [Xception](https://github.com/PaddlePaddle/models/tree/develop/v2/image_classification) +- 9.1 [convert Caffe model file to PaddlePaddle model file](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification/caffe2paddle) +- 9.2 [convert TensorFlow model file to PaddlePaddle model file](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification/tf2paddle) +- 9.3 [AlexNet](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification) +- 9.4 [VGG](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification) +- 9.5 [Residual Network](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification) +- 9.6 [Inception-v4](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification) +- 9.7 [Inception-Resnet-V2](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification) +- 9.8 [Xception](https://github.com/PaddlePaddle/models/tree/develop/legacy/image_classification) This tutorial is contributed by [PaddlePaddle](https://github.com/PaddlePaddle/Paddle) and licensed under the [Apache-2.0 license](LICENSE). diff --git a/v2/conv_seq2seq/README.md b/legacy/conv_seq2seq/README.md similarity index 100% rename from v2/conv_seq2seq/README.md rename to legacy/conv_seq2seq/README.md diff --git a/v2/conv_seq2seq/beamsearch.py b/legacy/conv_seq2seq/beamsearch.py similarity index 100% rename from v2/conv_seq2seq/beamsearch.py rename to legacy/conv_seq2seq/beamsearch.py diff --git a/v2/conv_seq2seq/download.sh b/legacy/conv_seq2seq/download.sh similarity index 100% rename from v2/conv_seq2seq/download.sh rename to legacy/conv_seq2seq/download.sh diff --git a/v2/conv_seq2seq/infer.py b/legacy/conv_seq2seq/infer.py similarity index 100% rename from v2/conv_seq2seq/infer.py rename to legacy/conv_seq2seq/infer.py diff --git a/v2/conv_seq2seq/model.py b/legacy/conv_seq2seq/model.py similarity index 100% rename from v2/conv_seq2seq/model.py rename to legacy/conv_seq2seq/model.py diff --git a/v2/conv_seq2seq/preprocess.py b/legacy/conv_seq2seq/preprocess.py similarity index 100% rename from v2/conv_seq2seq/preprocess.py rename to legacy/conv_seq2seq/preprocess.py diff --git a/v2/conv_seq2seq/reader.py b/legacy/conv_seq2seq/reader.py similarity index 100% rename from v2/conv_seq2seq/reader.py rename to legacy/conv_seq2seq/reader.py diff --git a/v2/conv_seq2seq/train.py b/legacy/conv_seq2seq/train.py similarity index 100% rename from v2/conv_seq2seq/train.py rename to legacy/conv_seq2seq/train.py diff --git a/v2/ctr/README.cn.md b/legacy/ctr/README.cn.md similarity index 100% rename from v2/ctr/README.cn.md rename to legacy/ctr/README.cn.md diff --git a/v2/ctr/README.md b/legacy/ctr/README.md similarity index 100% rename from v2/ctr/README.md rename to legacy/ctr/README.md diff --git a/v2/ctr/avazu_data_processer.py b/legacy/ctr/avazu_data_processer.py similarity index 100% rename from v2/ctr/avazu_data_processer.py rename to legacy/ctr/avazu_data_processer.py diff --git a/v2/ctr/dataset.md b/legacy/ctr/dataset.md similarity index 100% rename from v2/ctr/dataset.md rename to legacy/ctr/dataset.md diff --git a/v2/ctr/images/lr_vs_dnn.jpg b/legacy/ctr/images/lr_vs_dnn.jpg similarity index 100% rename from v2/ctr/images/lr_vs_dnn.jpg rename to legacy/ctr/images/lr_vs_dnn.jpg diff --git a/v2/ctr/images/wide_deep.png b/legacy/ctr/images/wide_deep.png similarity index 100% rename from v2/ctr/images/wide_deep.png rename to legacy/ctr/images/wide_deep.png diff --git a/v2/ctr/infer.py b/legacy/ctr/infer.py similarity index 100% rename from v2/ctr/infer.py rename to legacy/ctr/infer.py diff --git a/v2/ctr/network_conf.py b/legacy/ctr/network_conf.py similarity index 100% rename from v2/ctr/network_conf.py rename to legacy/ctr/network_conf.py diff --git a/v2/ctr/reader.py b/legacy/ctr/reader.py similarity index 100% rename from v2/ctr/reader.py rename to legacy/ctr/reader.py diff --git a/v2/ctr/train.py b/legacy/ctr/train.py similarity index 100% rename from v2/ctr/train.py rename to legacy/ctr/train.py diff --git a/v2/ctr/utils.py b/legacy/ctr/utils.py similarity index 100% rename from v2/ctr/utils.py rename to legacy/ctr/utils.py diff --git a/v2/deep_fm/README.cn.md b/legacy/deep_fm/README.cn.md similarity index 100% rename from v2/deep_fm/README.cn.md rename to legacy/deep_fm/README.cn.md diff --git a/v2/deep_fm/README.md b/legacy/deep_fm/README.md similarity index 100% rename from v2/deep_fm/README.md rename to legacy/deep_fm/README.md diff --git a/v2/deep_fm/data/download.sh b/legacy/deep_fm/data/download.sh similarity index 100% rename from v2/deep_fm/data/download.sh rename to legacy/deep_fm/data/download.sh diff --git a/v2/deep_fm/infer.py b/legacy/deep_fm/infer.py similarity index 100% rename from v2/deep_fm/infer.py rename to legacy/deep_fm/infer.py diff --git a/v2/deep_fm/network_conf.py b/legacy/deep_fm/network_conf.py similarity index 100% rename from v2/deep_fm/network_conf.py rename to legacy/deep_fm/network_conf.py diff --git a/v2/deep_fm/preprocess.py b/legacy/deep_fm/preprocess.py similarity index 100% rename from v2/deep_fm/preprocess.py rename to legacy/deep_fm/preprocess.py diff --git a/v2/deep_fm/reader.py b/legacy/deep_fm/reader.py similarity index 100% rename from v2/deep_fm/reader.py rename to legacy/deep_fm/reader.py diff --git a/v2/deep_fm/train.py b/legacy/deep_fm/train.py similarity index 100% rename from v2/deep_fm/train.py rename to legacy/deep_fm/train.py diff --git a/v2/dssm/README.cn.md b/legacy/dssm/README.cn.md similarity index 100% rename from v2/dssm/README.cn.md rename to legacy/dssm/README.cn.md diff --git a/v2/dssm/README.md b/legacy/dssm/README.md similarity index 100% rename from v2/dssm/README.md rename to legacy/dssm/README.md diff --git a/v2/dssm/data/classification/test.txt b/legacy/dssm/data/classification/test.txt similarity index 100% rename from v2/dssm/data/classification/test.txt rename to legacy/dssm/data/classification/test.txt diff --git a/v2/dssm/data/classification/train.txt b/legacy/dssm/data/classification/train.txt similarity index 100% rename from v2/dssm/data/classification/train.txt rename to legacy/dssm/data/classification/train.txt diff --git a/v2/dssm/data/rank/test.txt b/legacy/dssm/data/rank/test.txt similarity index 100% rename from v2/dssm/data/rank/test.txt rename to legacy/dssm/data/rank/test.txt diff --git a/v2/dssm/data/rank/train.txt b/legacy/dssm/data/rank/train.txt similarity index 100% rename from v2/dssm/data/rank/train.txt rename to legacy/dssm/data/rank/train.txt diff --git a/v2/dssm/data/vocab.txt b/legacy/dssm/data/vocab.txt similarity index 100% rename from v2/dssm/data/vocab.txt rename to legacy/dssm/data/vocab.txt diff --git a/v2/dssm/images/dssm.jpg b/legacy/dssm/images/dssm.jpg similarity index 100% rename from v2/dssm/images/dssm.jpg rename to legacy/dssm/images/dssm.jpg diff --git a/v2/dssm/images/dssm.png b/legacy/dssm/images/dssm.png similarity index 100% rename from v2/dssm/images/dssm.png rename to legacy/dssm/images/dssm.png diff --git a/v2/dssm/images/dssm2.jpg b/legacy/dssm/images/dssm2.jpg similarity index 100% rename from v2/dssm/images/dssm2.jpg rename to legacy/dssm/images/dssm2.jpg diff --git a/v2/dssm/images/dssm2.png b/legacy/dssm/images/dssm2.png similarity index 100% rename from v2/dssm/images/dssm2.png rename to legacy/dssm/images/dssm2.png diff --git a/v2/dssm/images/dssm3.jpg b/legacy/dssm/images/dssm3.jpg similarity index 100% rename from v2/dssm/images/dssm3.jpg rename to legacy/dssm/images/dssm3.jpg diff --git a/v2/dssm/infer.py b/legacy/dssm/infer.py similarity index 100% rename from v2/dssm/infer.py rename to legacy/dssm/infer.py diff --git a/v2/dssm/network_conf.py b/legacy/dssm/network_conf.py similarity index 100% rename from v2/dssm/network_conf.py rename to legacy/dssm/network_conf.py diff --git a/v2/dssm/reader.py b/legacy/dssm/reader.py similarity index 100% rename from v2/dssm/reader.py rename to legacy/dssm/reader.py diff --git a/v2/dssm/train.py b/legacy/dssm/train.py similarity index 100% rename from v2/dssm/train.py rename to legacy/dssm/train.py diff --git a/v2/dssm/utils.py b/legacy/dssm/utils.py similarity index 100% rename from v2/dssm/utils.py rename to legacy/dssm/utils.py diff --git a/v2/generate_chinese_poetry/README.md b/legacy/generate_chinese_poetry/README.md similarity index 100% rename from v2/generate_chinese_poetry/README.md rename to legacy/generate_chinese_poetry/README.md diff --git a/v2/generate_chinese_poetry/README_en.md b/legacy/generate_chinese_poetry/README_en.md similarity index 100% rename from v2/generate_chinese_poetry/README_en.md rename to legacy/generate_chinese_poetry/README_en.md diff --git a/v2/generate_chinese_poetry/data/download.sh b/legacy/generate_chinese_poetry/data/download.sh similarity index 100% rename from v2/generate_chinese_poetry/data/download.sh rename to legacy/generate_chinese_poetry/data/download.sh diff --git a/v2/generate_chinese_poetry/generate.py b/legacy/generate_chinese_poetry/generate.py similarity index 100% rename from v2/generate_chinese_poetry/generate.py rename to legacy/generate_chinese_poetry/generate.py diff --git a/v2/generate_chinese_poetry/network_conf.py b/legacy/generate_chinese_poetry/network_conf.py similarity index 100% rename from v2/generate_chinese_poetry/network_conf.py rename to legacy/generate_chinese_poetry/network_conf.py diff --git a/v2/generate_chinese_poetry/preprocess.py b/legacy/generate_chinese_poetry/preprocess.py similarity index 100% rename from v2/generate_chinese_poetry/preprocess.py rename to legacy/generate_chinese_poetry/preprocess.py diff --git a/v2/generate_chinese_poetry/reader.py b/legacy/generate_chinese_poetry/reader.py similarity index 100% rename from v2/generate_chinese_poetry/reader.py rename to legacy/generate_chinese_poetry/reader.py diff --git a/v2/generate_chinese_poetry/train.py b/legacy/generate_chinese_poetry/train.py similarity index 100% rename from v2/generate_chinese_poetry/train.py rename to legacy/generate_chinese_poetry/train.py diff --git a/v2/generate_chinese_poetry/utils.py b/legacy/generate_chinese_poetry/utils.py similarity index 100% rename from v2/generate_chinese_poetry/utils.py rename to legacy/generate_chinese_poetry/utils.py diff --git a/v2/generate_sequence_by_rnn_lm/.gitignore b/legacy/generate_sequence_by_rnn_lm/.gitignore similarity index 100% rename from v2/generate_sequence_by_rnn_lm/.gitignore rename to legacy/generate_sequence_by_rnn_lm/.gitignore diff --git a/v2/generate_sequence_by_rnn_lm/README.md b/legacy/generate_sequence_by_rnn_lm/README.md similarity index 100% rename from v2/generate_sequence_by_rnn_lm/README.md rename to legacy/generate_sequence_by_rnn_lm/README.md diff --git a/v2/generate_sequence_by_rnn_lm/beam_search.py b/legacy/generate_sequence_by_rnn_lm/beam_search.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/beam_search.py rename to legacy/generate_sequence_by_rnn_lm/beam_search.py diff --git a/v2/generate_sequence_by_rnn_lm/config.py b/legacy/generate_sequence_by_rnn_lm/config.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/config.py rename to legacy/generate_sequence_by_rnn_lm/config.py diff --git a/v2/generate_sequence_by_rnn_lm/data/train_data_examples.txt b/legacy/generate_sequence_by_rnn_lm/data/train_data_examples.txt similarity index 100% rename from v2/generate_sequence_by_rnn_lm/data/train_data_examples.txt rename to legacy/generate_sequence_by_rnn_lm/data/train_data_examples.txt diff --git a/v2/generate_sequence_by_rnn_lm/generate.py b/legacy/generate_sequence_by_rnn_lm/generate.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/generate.py rename to legacy/generate_sequence_by_rnn_lm/generate.py diff --git a/v2/generate_sequence_by_rnn_lm/images/ngram.png b/legacy/generate_sequence_by_rnn_lm/images/ngram.png similarity index 100% rename from v2/generate_sequence_by_rnn_lm/images/ngram.png rename to legacy/generate_sequence_by_rnn_lm/images/ngram.png diff --git a/v2/generate_sequence_by_rnn_lm/images/rnn.png b/legacy/generate_sequence_by_rnn_lm/images/rnn.png similarity index 100% rename from v2/generate_sequence_by_rnn_lm/images/rnn.png rename to legacy/generate_sequence_by_rnn_lm/images/rnn.png diff --git a/v2/generate_sequence_by_rnn_lm/network_conf.py b/legacy/generate_sequence_by_rnn_lm/network_conf.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/network_conf.py rename to legacy/generate_sequence_by_rnn_lm/network_conf.py diff --git a/v2/generate_sequence_by_rnn_lm/reader.py b/legacy/generate_sequence_by_rnn_lm/reader.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/reader.py rename to legacy/generate_sequence_by_rnn_lm/reader.py diff --git a/v2/generate_sequence_by_rnn_lm/train.py b/legacy/generate_sequence_by_rnn_lm/train.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/train.py rename to legacy/generate_sequence_by_rnn_lm/train.py diff --git a/v2/generate_sequence_by_rnn_lm/utils.py b/legacy/generate_sequence_by_rnn_lm/utils.py similarity index 100% rename from v2/generate_sequence_by_rnn_lm/utils.py rename to legacy/generate_sequence_by_rnn_lm/utils.py diff --git a/v2/globally_normalized_reader/.gitignore b/legacy/globally_normalized_reader/.gitignore similarity index 100% rename from v2/globally_normalized_reader/.gitignore rename to legacy/globally_normalized_reader/.gitignore diff --git a/v2/globally_normalized_reader/README.cn.md b/legacy/globally_normalized_reader/README.cn.md similarity index 100% rename from v2/globally_normalized_reader/README.cn.md rename to legacy/globally_normalized_reader/README.cn.md diff --git a/v2/globally_normalized_reader/README.md b/legacy/globally_normalized_reader/README.md similarity index 100% rename from v2/globally_normalized_reader/README.md rename to legacy/globally_normalized_reader/README.md diff --git a/v2/globally_normalized_reader/basic_modules.py b/legacy/globally_normalized_reader/basic_modules.py similarity index 100% rename from v2/globally_normalized_reader/basic_modules.py rename to legacy/globally_normalized_reader/basic_modules.py diff --git a/v2/globally_normalized_reader/beam_decoding.py b/legacy/globally_normalized_reader/beam_decoding.py similarity index 100% rename from v2/globally_normalized_reader/beam_decoding.py rename to legacy/globally_normalized_reader/beam_decoding.py diff --git a/v2/globally_normalized_reader/config.py b/legacy/globally_normalized_reader/config.py similarity index 100% rename from v2/globally_normalized_reader/config.py rename to legacy/globally_normalized_reader/config.py diff --git a/v2/globally_normalized_reader/data/download.sh b/legacy/globally_normalized_reader/data/download.sh similarity index 100% rename from v2/globally_normalized_reader/data/download.sh rename to legacy/globally_normalized_reader/data/download.sh diff --git a/v2/globally_normalized_reader/evaluate.py b/legacy/globally_normalized_reader/evaluate.py similarity index 100% rename from v2/globally_normalized_reader/evaluate.py rename to legacy/globally_normalized_reader/evaluate.py diff --git a/v2/globally_normalized_reader/featurize.py b/legacy/globally_normalized_reader/featurize.py similarity index 100% rename from v2/globally_normalized_reader/featurize.py rename to legacy/globally_normalized_reader/featurize.py diff --git a/v2/globally_normalized_reader/infer.py b/legacy/globally_normalized_reader/infer.py similarity index 100% rename from v2/globally_normalized_reader/infer.py rename to legacy/globally_normalized_reader/infer.py diff --git a/v2/globally_normalized_reader/model.py b/legacy/globally_normalized_reader/model.py similarity index 100% rename from v2/globally_normalized_reader/model.py rename to legacy/globally_normalized_reader/model.py diff --git a/v2/globally_normalized_reader/reader.py b/legacy/globally_normalized_reader/reader.py similarity index 100% rename from v2/globally_normalized_reader/reader.py rename to legacy/globally_normalized_reader/reader.py diff --git a/v2/globally_normalized_reader/train.py b/legacy/globally_normalized_reader/train.py similarity index 100% rename from v2/globally_normalized_reader/train.py rename to legacy/globally_normalized_reader/train.py diff --git a/v2/globally_normalized_reader/vocab.py b/legacy/globally_normalized_reader/vocab.py similarity index 100% rename from v2/globally_normalized_reader/vocab.py rename to legacy/globally_normalized_reader/vocab.py diff --git a/v2/hsigmoid/.gitignore b/legacy/hsigmoid/.gitignore similarity index 100% rename from v2/hsigmoid/.gitignore rename to legacy/hsigmoid/.gitignore diff --git a/v2/hsigmoid/README.md b/legacy/hsigmoid/README.md similarity index 100% rename from v2/hsigmoid/README.md rename to legacy/hsigmoid/README.md diff --git a/v2/hsigmoid/images/binary_tree.png b/legacy/hsigmoid/images/binary_tree.png similarity index 100% rename from v2/hsigmoid/images/binary_tree.png rename to legacy/hsigmoid/images/binary_tree.png diff --git a/v2/hsigmoid/images/network_conf.png b/legacy/hsigmoid/images/network_conf.png similarity index 100% rename from v2/hsigmoid/images/network_conf.png rename to legacy/hsigmoid/images/network_conf.png diff --git a/v2/hsigmoid/images/path_to_1.png b/legacy/hsigmoid/images/path_to_1.png similarity index 100% rename from v2/hsigmoid/images/path_to_1.png rename to legacy/hsigmoid/images/path_to_1.png diff --git a/v2/hsigmoid/infer.py b/legacy/hsigmoid/infer.py similarity index 100% rename from v2/hsigmoid/infer.py rename to legacy/hsigmoid/infer.py diff --git a/v2/hsigmoid/network_conf.py b/legacy/hsigmoid/network_conf.py similarity index 100% rename from v2/hsigmoid/network_conf.py rename to legacy/hsigmoid/network_conf.py diff --git a/v2/hsigmoid/train.py b/legacy/hsigmoid/train.py similarity index 100% rename from v2/hsigmoid/train.py rename to legacy/hsigmoid/train.py diff --git a/v2/image_classification/README.md b/legacy/image_classification/README.md similarity index 100% rename from v2/image_classification/README.md rename to legacy/image_classification/README.md diff --git a/v2/image_classification/alexnet.py b/legacy/image_classification/alexnet.py similarity index 100% rename from v2/image_classification/alexnet.py rename to legacy/image_classification/alexnet.py diff --git a/v2/image_classification/caffe2paddle/README.md b/legacy/image_classification/caffe2paddle/README.md similarity index 100% rename from v2/image_classification/caffe2paddle/README.md rename to legacy/image_classification/caffe2paddle/README.md diff --git a/v2/image_classification/caffe2paddle/caffe2paddle.py b/legacy/image_classification/caffe2paddle/caffe2paddle.py similarity index 100% rename from v2/image_classification/caffe2paddle/caffe2paddle.py rename to legacy/image_classification/caffe2paddle/caffe2paddle.py diff --git a/v2/image_classification/googlenet.py b/legacy/image_classification/googlenet.py similarity index 100% rename from v2/image_classification/googlenet.py rename to legacy/image_classification/googlenet.py diff --git a/v2/image_classification/inception_resnet_v2.py b/legacy/image_classification/inception_resnet_v2.py similarity index 100% rename from v2/image_classification/inception_resnet_v2.py rename to legacy/image_classification/inception_resnet_v2.py diff --git a/v2/image_classification/inception_v4.py b/legacy/image_classification/inception_v4.py similarity index 100% rename from v2/image_classification/inception_v4.py rename to legacy/image_classification/inception_v4.py diff --git a/v2/image_classification/infer.py b/legacy/image_classification/infer.py similarity index 100% rename from v2/image_classification/infer.py rename to legacy/image_classification/infer.py diff --git a/v2/image_classification/models/model_download.sh b/legacy/image_classification/models/model_download.sh similarity index 100% rename from v2/image_classification/models/model_download.sh rename to legacy/image_classification/models/model_download.sh diff --git a/v2/image_classification/reader.py b/legacy/image_classification/reader.py similarity index 100% rename from v2/image_classification/reader.py rename to legacy/image_classification/reader.py diff --git a/v2/image_classification/resnet.py b/legacy/image_classification/resnet.py similarity index 100% rename from v2/image_classification/resnet.py rename to legacy/image_classification/resnet.py diff --git a/v2/image_classification/tf2paddle/README.md b/legacy/image_classification/tf2paddle/README.md similarity index 100% rename from v2/image_classification/tf2paddle/README.md rename to legacy/image_classification/tf2paddle/README.md diff --git a/v2/image_classification/tf2paddle/tf2paddle.py b/legacy/image_classification/tf2paddle/tf2paddle.py similarity index 100% rename from v2/image_classification/tf2paddle/tf2paddle.py rename to legacy/image_classification/tf2paddle/tf2paddle.py diff --git a/v2/image_classification/train.py b/legacy/image_classification/train.py similarity index 100% rename from v2/image_classification/train.py rename to legacy/image_classification/train.py diff --git a/v2/image_classification/vgg.py b/legacy/image_classification/vgg.py similarity index 100% rename from v2/image_classification/vgg.py rename to legacy/image_classification/vgg.py diff --git a/v2/image_classification/xception.py b/legacy/image_classification/xception.py similarity index 100% rename from v2/image_classification/xception.py rename to legacy/image_classification/xception.py diff --git a/v2/ltr/README.md b/legacy/ltr/README.md similarity index 100% rename from v2/ltr/README.md rename to legacy/ltr/README.md diff --git a/v2/ltr/README_en.md b/legacy/ltr/README_en.md similarity index 100% rename from v2/ltr/README_en.md rename to legacy/ltr/README_en.md diff --git a/v2/ltr/images/LambdaRank_EN.png b/legacy/ltr/images/LambdaRank_EN.png similarity index 100% rename from v2/ltr/images/LambdaRank_EN.png rename to legacy/ltr/images/LambdaRank_EN.png diff --git a/v2/ltr/images/lambdarank.jpg b/legacy/ltr/images/lambdarank.jpg similarity index 100% rename from v2/ltr/images/lambdarank.jpg rename to legacy/ltr/images/lambdarank.jpg diff --git a/v2/ltr/images/learning_to_rank.jpg b/legacy/ltr/images/learning_to_rank.jpg similarity index 100% rename from v2/ltr/images/learning_to_rank.jpg rename to legacy/ltr/images/learning_to_rank.jpg diff --git a/v2/ltr/images/ranknet.jpg b/legacy/ltr/images/ranknet.jpg similarity index 100% rename from v2/ltr/images/ranknet.jpg rename to legacy/ltr/images/ranknet.jpg diff --git a/v2/ltr/images/ranknet_en.png b/legacy/ltr/images/ranknet_en.png similarity index 100% rename from v2/ltr/images/ranknet_en.png rename to legacy/ltr/images/ranknet_en.png diff --git a/v2/ltr/images/search_engine_example.png b/legacy/ltr/images/search_engine_example.png similarity index 100% rename from v2/ltr/images/search_engine_example.png rename to legacy/ltr/images/search_engine_example.png diff --git a/v2/ltr/infer.py b/legacy/ltr/infer.py similarity index 100% rename from v2/ltr/infer.py rename to legacy/ltr/infer.py diff --git a/v2/ltr/lambda_rank.py b/legacy/ltr/lambda_rank.py similarity index 100% rename from v2/ltr/lambda_rank.py rename to legacy/ltr/lambda_rank.py diff --git a/v2/ltr/ranknet.py b/legacy/ltr/ranknet.py similarity index 100% rename from v2/ltr/ranknet.py rename to legacy/ltr/ranknet.py diff --git a/v2/ltr/train.py b/legacy/ltr/train.py similarity index 100% rename from v2/ltr/train.py rename to legacy/ltr/train.py diff --git a/v2/mt_with_external_memory/README.md b/legacy/mt_with_external_memory/README.md similarity index 100% rename from v2/mt_with_external_memory/README.md rename to legacy/mt_with_external_memory/README.md diff --git a/v2/mt_with_external_memory/data_utils.py b/legacy/mt_with_external_memory/data_utils.py similarity index 100% rename from v2/mt_with_external_memory/data_utils.py rename to legacy/mt_with_external_memory/data_utils.py diff --git a/v2/mt_with_external_memory/external_memory.py b/legacy/mt_with_external_memory/external_memory.py similarity index 100% rename from v2/mt_with_external_memory/external_memory.py rename to legacy/mt_with_external_memory/external_memory.py diff --git a/v2/mt_with_external_memory/image/lstm_c_state.png b/legacy/mt_with_external_memory/image/lstm_c_state.png similarity index 100% rename from v2/mt_with_external_memory/image/lstm_c_state.png rename to legacy/mt_with_external_memory/image/lstm_c_state.png diff --git a/v2/mt_with_external_memory/image/memory_enhanced_decoder.png b/legacy/mt_with_external_memory/image/memory_enhanced_decoder.png similarity index 100% rename from v2/mt_with_external_memory/image/memory_enhanced_decoder.png rename to legacy/mt_with_external_memory/image/memory_enhanced_decoder.png diff --git a/v2/mt_with_external_memory/image/neural_turing_machine_arch.png b/legacy/mt_with_external_memory/image/neural_turing_machine_arch.png similarity index 100% rename from v2/mt_with_external_memory/image/neural_turing_machine_arch.png rename to legacy/mt_with_external_memory/image/neural_turing_machine_arch.png diff --git a/v2/mt_with_external_memory/image/turing_machine_cartoon.gif b/legacy/mt_with_external_memory/image/turing_machine_cartoon.gif similarity index 100% rename from v2/mt_with_external_memory/image/turing_machine_cartoon.gif rename to legacy/mt_with_external_memory/image/turing_machine_cartoon.gif diff --git a/v2/mt_with_external_memory/infer.py b/legacy/mt_with_external_memory/infer.py similarity index 100% rename from v2/mt_with_external_memory/infer.py rename to legacy/mt_with_external_memory/infer.py diff --git a/v2/mt_with_external_memory/model.py b/legacy/mt_with_external_memory/model.py similarity index 100% rename from v2/mt_with_external_memory/model.py rename to legacy/mt_with_external_memory/model.py diff --git a/v2/mt_with_external_memory/train.py b/legacy/mt_with_external_memory/train.py similarity index 100% rename from v2/mt_with_external_memory/train.py rename to legacy/mt_with_external_memory/train.py diff --git a/v2/nce_cost/.gitignore b/legacy/nce_cost/.gitignore similarity index 100% rename from v2/nce_cost/.gitignore rename to legacy/nce_cost/.gitignore diff --git a/v2/nce_cost/README.md b/legacy/nce_cost/README.md similarity index 100% rename from v2/nce_cost/README.md rename to legacy/nce_cost/README.md diff --git a/v2/nce_cost/images/network_conf.png b/legacy/nce_cost/images/network_conf.png similarity index 100% rename from v2/nce_cost/images/network_conf.png rename to legacy/nce_cost/images/network_conf.png diff --git a/v2/nce_cost/infer.py b/legacy/nce_cost/infer.py similarity index 100% rename from v2/nce_cost/infer.py rename to legacy/nce_cost/infer.py diff --git a/v2/nce_cost/network_conf.py b/legacy/nce_cost/network_conf.py similarity index 100% rename from v2/nce_cost/network_conf.py rename to legacy/nce_cost/network_conf.py diff --git a/v2/nce_cost/train.py b/legacy/nce_cost/train.py similarity index 100% rename from v2/nce_cost/train.py rename to legacy/nce_cost/train.py diff --git a/v2/nested_sequence/README.md b/legacy/nested_sequence/README.md similarity index 100% rename from v2/nested_sequence/README.md rename to legacy/nested_sequence/README.md diff --git a/v2/nested_sequence/README_en.md b/legacy/nested_sequence/README_en.md similarity index 100% rename from v2/nested_sequence/README_en.md rename to legacy/nested_sequence/README_en.md diff --git a/v2/nested_sequence/text_classification/.gitignore b/legacy/nested_sequence/text_classification/.gitignore similarity index 100% rename from v2/nested_sequence/text_classification/.gitignore rename to legacy/nested_sequence/text_classification/.gitignore diff --git a/v2/nested_sequence/text_classification/README.md b/legacy/nested_sequence/text_classification/README.md similarity index 100% rename from v2/nested_sequence/text_classification/README.md rename to legacy/nested_sequence/text_classification/README.md diff --git a/v2/nested_sequence/text_classification/README_en.md b/legacy/nested_sequence/text_classification/README_en.md similarity index 100% rename from v2/nested_sequence/text_classification/README_en.md rename to legacy/nested_sequence/text_classification/README_en.md diff --git a/v2/nested_sequence/text_classification/config.py b/legacy/nested_sequence/text_classification/config.py similarity index 100% rename from v2/nested_sequence/text_classification/config.py rename to legacy/nested_sequence/text_classification/config.py diff --git a/v2/nested_sequence/text_classification/data/infer.txt b/legacy/nested_sequence/text_classification/data/infer.txt similarity index 100% rename from v2/nested_sequence/text_classification/data/infer.txt rename to legacy/nested_sequence/text_classification/data/infer.txt diff --git a/v2/nested_sequence/text_classification/data/test_data/test.txt b/legacy/nested_sequence/text_classification/data/test_data/test.txt similarity index 100% rename from v2/nested_sequence/text_classification/data/test_data/test.txt rename to legacy/nested_sequence/text_classification/data/test_data/test.txt diff --git a/v2/nested_sequence/text_classification/data/train_data/train.txt b/legacy/nested_sequence/text_classification/data/train_data/train.txt similarity index 100% rename from v2/nested_sequence/text_classification/data/train_data/train.txt rename to legacy/nested_sequence/text_classification/data/train_data/train.txt diff --git a/v2/nested_sequence/text_classification/images/model.jpg b/legacy/nested_sequence/text_classification/images/model.jpg similarity index 100% rename from v2/nested_sequence/text_classification/images/model.jpg rename to legacy/nested_sequence/text_classification/images/model.jpg diff --git a/v2/nested_sequence/text_classification/infer.py b/legacy/nested_sequence/text_classification/infer.py similarity index 100% rename from v2/nested_sequence/text_classification/infer.py rename to legacy/nested_sequence/text_classification/infer.py diff --git a/v2/nested_sequence/text_classification/network_conf.py b/legacy/nested_sequence/text_classification/network_conf.py similarity index 100% rename from v2/nested_sequence/text_classification/network_conf.py rename to legacy/nested_sequence/text_classification/network_conf.py diff --git a/v2/nested_sequence/text_classification/reader.py b/legacy/nested_sequence/text_classification/reader.py similarity index 100% rename from v2/nested_sequence/text_classification/reader.py rename to legacy/nested_sequence/text_classification/reader.py diff --git a/v2/nested_sequence/text_classification/requirements.txt b/legacy/nested_sequence/text_classification/requirements.txt similarity index 100% rename from v2/nested_sequence/text_classification/requirements.txt rename to legacy/nested_sequence/text_classification/requirements.txt diff --git a/v2/nested_sequence/text_classification/train.py b/legacy/nested_sequence/text_classification/train.py similarity index 100% rename from v2/nested_sequence/text_classification/train.py rename to legacy/nested_sequence/text_classification/train.py diff --git a/v2/nested_sequence/text_classification/utils.py b/legacy/nested_sequence/text_classification/utils.py similarity index 100% rename from v2/nested_sequence/text_classification/utils.py rename to legacy/nested_sequence/text_classification/utils.py diff --git a/v2/neural_qa/.gitignore b/legacy/neural_qa/.gitignore similarity index 100% rename from v2/neural_qa/.gitignore rename to legacy/neural_qa/.gitignore diff --git a/v2/neural_qa/README.md b/legacy/neural_qa/README.md similarity index 100% rename from v2/neural_qa/README.md rename to legacy/neural_qa/README.md diff --git a/v2/neural_qa/config.py b/legacy/neural_qa/config.py similarity index 100% rename from v2/neural_qa/config.py rename to legacy/neural_qa/config.py diff --git a/v2/neural_qa/infer.py b/legacy/neural_qa/infer.py similarity index 100% rename from v2/neural_qa/infer.py rename to legacy/neural_qa/infer.py diff --git a/v2/neural_qa/network.py b/legacy/neural_qa/network.py similarity index 100% rename from v2/neural_qa/network.py rename to legacy/neural_qa/network.py diff --git a/v2/neural_qa/pre-trained-models/download-models.sh b/legacy/neural_qa/pre-trained-models/download-models.sh similarity index 100% rename from v2/neural_qa/pre-trained-models/download-models.sh rename to legacy/neural_qa/pre-trained-models/download-models.sh diff --git a/v2/neural_qa/pre-trained-models/neural_seq_qa.pre-trained-models.2017-10-27.tar.gz.md5 b/legacy/neural_qa/pre-trained-models/neural_seq_qa.pre-trained-models.2017-10-27.tar.gz.md5 similarity index 100% rename from v2/neural_qa/pre-trained-models/neural_seq_qa.pre-trained-models.2017-10-27.tar.gz.md5 rename to legacy/neural_qa/pre-trained-models/neural_seq_qa.pre-trained-models.2017-10-27.tar.gz.md5 diff --git a/v2/neural_qa/reader.py b/legacy/neural_qa/reader.py similarity index 100% rename from v2/neural_qa/reader.py rename to legacy/neural_qa/reader.py diff --git a/v2/neural_qa/test/test_reader.py b/legacy/neural_qa/test/test_reader.py similarity index 100% rename from v2/neural_qa/test/test_reader.py rename to legacy/neural_qa/test/test_reader.py diff --git a/v2/neural_qa/test/trn_data.gz b/legacy/neural_qa/test/trn_data.gz similarity index 100% rename from v2/neural_qa/test/trn_data.gz rename to legacy/neural_qa/test/trn_data.gz diff --git a/v2/neural_qa/train.py b/legacy/neural_qa/train.py similarity index 100% rename from v2/neural_qa/train.py rename to legacy/neural_qa/train.py diff --git a/v2/neural_qa/utils.py b/legacy/neural_qa/utils.py similarity index 100% rename from v2/neural_qa/utils.py rename to legacy/neural_qa/utils.py diff --git a/v2/neural_qa/val_and_test.py b/legacy/neural_qa/val_and_test.py similarity index 100% rename from v2/neural_qa/val_and_test.py rename to legacy/neural_qa/val_and_test.py diff --git a/v2/nmt_without_attention/README.cn.md b/legacy/nmt_without_attention/README.cn.md similarity index 100% rename from v2/nmt_without_attention/README.cn.md rename to legacy/nmt_without_attention/README.cn.md diff --git a/v2/nmt_without_attention/README.md b/legacy/nmt_without_attention/README.md similarity index 100% rename from v2/nmt_without_attention/README.md rename to legacy/nmt_without_attention/README.md diff --git a/v2/nmt_without_attention/generate.py b/legacy/nmt_without_attention/generate.py similarity index 100% rename from v2/nmt_without_attention/generate.py rename to legacy/nmt_without_attention/generate.py diff --git a/v2/nmt_without_attention/images/bidirectional-encoder.png b/legacy/nmt_without_attention/images/bidirectional-encoder.png similarity index 100% rename from v2/nmt_without_attention/images/bidirectional-encoder.png rename to legacy/nmt_without_attention/images/bidirectional-encoder.png diff --git a/v2/nmt_without_attention/images/encoder-decoder.png b/legacy/nmt_without_attention/images/encoder-decoder.png similarity index 100% rename from v2/nmt_without_attention/images/encoder-decoder.png rename to legacy/nmt_without_attention/images/encoder-decoder.png diff --git a/v2/nmt_without_attention/images/gru.png b/legacy/nmt_without_attention/images/gru.png similarity index 100% rename from v2/nmt_without_attention/images/gru.png rename to legacy/nmt_without_attention/images/gru.png diff --git a/v2/nmt_without_attention/network_conf.py b/legacy/nmt_without_attention/network_conf.py similarity index 100% rename from v2/nmt_without_attention/network_conf.py rename to legacy/nmt_without_attention/network_conf.py diff --git a/v2/nmt_without_attention/train.py b/legacy/nmt_without_attention/train.py similarity index 100% rename from v2/nmt_without_attention/train.py rename to legacy/nmt_without_attention/train.py diff --git a/v2/scene_text_recognition/README.md b/legacy/scene_text_recognition/README.md similarity index 100% rename from v2/scene_text_recognition/README.md rename to legacy/scene_text_recognition/README.md diff --git a/v2/scene_text_recognition/config.py b/legacy/scene_text_recognition/config.py similarity index 100% rename from v2/scene_text_recognition/config.py rename to legacy/scene_text_recognition/config.py diff --git a/v2/scene_text_recognition/decoder.py b/legacy/scene_text_recognition/decoder.py similarity index 100% rename from v2/scene_text_recognition/decoder.py rename to legacy/scene_text_recognition/decoder.py diff --git a/v2/scene_text_recognition/images/503.jpg b/legacy/scene_text_recognition/images/503.jpg similarity index 100% rename from v2/scene_text_recognition/images/503.jpg rename to legacy/scene_text_recognition/images/503.jpg diff --git a/v2/scene_text_recognition/images/504.jpg b/legacy/scene_text_recognition/images/504.jpg similarity index 100% rename from v2/scene_text_recognition/images/504.jpg rename to legacy/scene_text_recognition/images/504.jpg diff --git a/v2/scene_text_recognition/images/505.jpg b/legacy/scene_text_recognition/images/505.jpg similarity index 100% rename from v2/scene_text_recognition/images/505.jpg rename to legacy/scene_text_recognition/images/505.jpg diff --git a/v2/scene_text_recognition/images/ctc.png b/legacy/scene_text_recognition/images/ctc.png similarity index 100% rename from v2/scene_text_recognition/images/ctc.png rename to legacy/scene_text_recognition/images/ctc.png diff --git a/v2/scene_text_recognition/images/feature_vector.png b/legacy/scene_text_recognition/images/feature_vector.png similarity index 100% rename from v2/scene_text_recognition/images/feature_vector.png rename to legacy/scene_text_recognition/images/feature_vector.png diff --git a/v2/scene_text_recognition/images/transcription.png b/legacy/scene_text_recognition/images/transcription.png similarity index 100% rename from v2/scene_text_recognition/images/transcription.png rename to legacy/scene_text_recognition/images/transcription.png diff --git a/v2/scene_text_recognition/infer.py b/legacy/scene_text_recognition/infer.py similarity index 100% rename from v2/scene_text_recognition/infer.py rename to legacy/scene_text_recognition/infer.py diff --git a/v2/scene_text_recognition/network_conf.py b/legacy/scene_text_recognition/network_conf.py similarity index 100% rename from v2/scene_text_recognition/network_conf.py rename to legacy/scene_text_recognition/network_conf.py diff --git a/v2/scene_text_recognition/reader.py b/legacy/scene_text_recognition/reader.py similarity index 100% rename from v2/scene_text_recognition/reader.py rename to legacy/scene_text_recognition/reader.py diff --git a/v2/scene_text_recognition/requirements.txt b/legacy/scene_text_recognition/requirements.txt similarity index 100% rename from v2/scene_text_recognition/requirements.txt rename to legacy/scene_text_recognition/requirements.txt diff --git a/v2/scene_text_recognition/train.py b/legacy/scene_text_recognition/train.py similarity index 100% rename from v2/scene_text_recognition/train.py rename to legacy/scene_text_recognition/train.py diff --git a/v2/scene_text_recognition/utils.py b/legacy/scene_text_recognition/utils.py similarity index 100% rename from v2/scene_text_recognition/utils.py rename to legacy/scene_text_recognition/utils.py diff --git a/v2/scheduled_sampling/README.md b/legacy/scheduled_sampling/README.md similarity index 100% rename from v2/scheduled_sampling/README.md rename to legacy/scheduled_sampling/README.md diff --git a/v2/scheduled_sampling/README_en.md b/legacy/scheduled_sampling/README_en.md similarity index 100% rename from v2/scheduled_sampling/README_en.md rename to legacy/scheduled_sampling/README_en.md diff --git a/v2/scheduled_sampling/generate.py b/legacy/scheduled_sampling/generate.py similarity index 100% rename from v2/scheduled_sampling/generate.py rename to legacy/scheduled_sampling/generate.py diff --git a/v2/scheduled_sampling/images/Scheduled_Sampling.jpg b/legacy/scheduled_sampling/images/Scheduled_Sampling.jpg similarity index 100% rename from v2/scheduled_sampling/images/Scheduled_Sampling.jpg rename to legacy/scheduled_sampling/images/Scheduled_Sampling.jpg diff --git a/v2/scheduled_sampling/images/decay.jpg b/legacy/scheduled_sampling/images/decay.jpg similarity index 100% rename from v2/scheduled_sampling/images/decay.jpg rename to legacy/scheduled_sampling/images/decay.jpg diff --git a/v2/scheduled_sampling/network_conf.py b/legacy/scheduled_sampling/network_conf.py similarity index 100% rename from v2/scheduled_sampling/network_conf.py rename to legacy/scheduled_sampling/network_conf.py diff --git a/v2/scheduled_sampling/reader.py b/legacy/scheduled_sampling/reader.py similarity index 100% rename from v2/scheduled_sampling/reader.py rename to legacy/scheduled_sampling/reader.py diff --git a/v2/scheduled_sampling/train.py b/legacy/scheduled_sampling/train.py similarity index 100% rename from v2/scheduled_sampling/train.py rename to legacy/scheduled_sampling/train.py diff --git a/v2/scheduled_sampling/utils.py b/legacy/scheduled_sampling/utils.py similarity index 100% rename from v2/scheduled_sampling/utils.py rename to legacy/scheduled_sampling/utils.py diff --git a/v2/sequence_tagging_for_ner/.gitignore b/legacy/sequence_tagging_for_ner/.gitignore similarity index 100% rename from v2/sequence_tagging_for_ner/.gitignore rename to legacy/sequence_tagging_for_ner/.gitignore diff --git a/v2/sequence_tagging_for_ner/README.md b/legacy/sequence_tagging_for_ner/README.md similarity index 100% rename from v2/sequence_tagging_for_ner/README.md rename to legacy/sequence_tagging_for_ner/README.md diff --git a/v2/sequence_tagging_for_ner/data/download.sh b/legacy/sequence_tagging_for_ner/data/download.sh similarity index 100% rename from v2/sequence_tagging_for_ner/data/download.sh rename to legacy/sequence_tagging_for_ner/data/download.sh diff --git a/v2/sequence_tagging_for_ner/data/target.txt b/legacy/sequence_tagging_for_ner/data/target.txt similarity index 100% rename from v2/sequence_tagging_for_ner/data/target.txt rename to legacy/sequence_tagging_for_ner/data/target.txt diff --git a/v2/sequence_tagging_for_ner/data/test b/legacy/sequence_tagging_for_ner/data/test similarity index 100% rename from v2/sequence_tagging_for_ner/data/test rename to legacy/sequence_tagging_for_ner/data/test diff --git a/v2/sequence_tagging_for_ner/data/train b/legacy/sequence_tagging_for_ner/data/train similarity index 100% rename from v2/sequence_tagging_for_ner/data/train rename to legacy/sequence_tagging_for_ner/data/train diff --git a/v2/sequence_tagging_for_ner/data/vocab.txt b/legacy/sequence_tagging_for_ner/data/vocab.txt similarity index 100% rename from v2/sequence_tagging_for_ner/data/vocab.txt rename to legacy/sequence_tagging_for_ner/data/vocab.txt diff --git a/v2/sequence_tagging_for_ner/images/BIO tag example.png b/legacy/sequence_tagging_for_ner/images/BIO tag example.png similarity index 100% rename from v2/sequence_tagging_for_ner/images/BIO tag example.png rename to legacy/sequence_tagging_for_ner/images/BIO tag example.png diff --git a/v2/sequence_tagging_for_ner/images/ner_label_ins.png b/legacy/sequence_tagging_for_ner/images/ner_label_ins.png similarity index 100% rename from v2/sequence_tagging_for_ner/images/ner_label_ins.png rename to legacy/sequence_tagging_for_ner/images/ner_label_ins.png diff --git a/v2/sequence_tagging_for_ner/images/ner_model_en.png b/legacy/sequence_tagging_for_ner/images/ner_model_en.png similarity index 100% rename from v2/sequence_tagging_for_ner/images/ner_model_en.png rename to legacy/sequence_tagging_for_ner/images/ner_model_en.png diff --git a/v2/sequence_tagging_for_ner/images/ner_network.png b/legacy/sequence_tagging_for_ner/images/ner_network.png similarity index 100% rename from v2/sequence_tagging_for_ner/images/ner_network.png rename to legacy/sequence_tagging_for_ner/images/ner_network.png diff --git a/v2/sequence_tagging_for_ner/infer.py b/legacy/sequence_tagging_for_ner/infer.py similarity index 100% rename from v2/sequence_tagging_for_ner/infer.py rename to legacy/sequence_tagging_for_ner/infer.py diff --git a/v2/sequence_tagging_for_ner/network_conf.py b/legacy/sequence_tagging_for_ner/network_conf.py similarity index 100% rename from v2/sequence_tagging_for_ner/network_conf.py rename to legacy/sequence_tagging_for_ner/network_conf.py diff --git a/v2/sequence_tagging_for_ner/reader.py b/legacy/sequence_tagging_for_ner/reader.py similarity index 100% rename from v2/sequence_tagging_for_ner/reader.py rename to legacy/sequence_tagging_for_ner/reader.py diff --git a/v2/sequence_tagging_for_ner/train.py b/legacy/sequence_tagging_for_ner/train.py similarity index 100% rename from v2/sequence_tagging_for_ner/train.py rename to legacy/sequence_tagging_for_ner/train.py diff --git a/v2/sequence_tagging_for_ner/utils.py b/legacy/sequence_tagging_for_ner/utils.py similarity index 100% rename from v2/sequence_tagging_for_ner/utils.py rename to legacy/sequence_tagging_for_ner/utils.py diff --git a/v2/ssd/README.cn.md b/legacy/ssd/README.cn.md similarity index 100% rename from v2/ssd/README.cn.md rename to legacy/ssd/README.cn.md diff --git a/v2/ssd/README.md b/legacy/ssd/README.md similarity index 100% rename from v2/ssd/README.md rename to legacy/ssd/README.md diff --git a/v2/ssd/config/__init__.py b/legacy/ssd/config/__init__.py similarity index 100% rename from v2/ssd/config/__init__.py rename to legacy/ssd/config/__init__.py diff --git a/v2/ssd/config/pascal_voc_conf.py b/legacy/ssd/config/pascal_voc_conf.py similarity index 100% rename from v2/ssd/config/pascal_voc_conf.py rename to legacy/ssd/config/pascal_voc_conf.py diff --git a/v2/ssd/data/label_list b/legacy/ssd/data/label_list similarity index 100% rename from v2/ssd/data/label_list rename to legacy/ssd/data/label_list diff --git a/v2/ssd/data/prepare_voc_data.py b/legacy/ssd/data/prepare_voc_data.py similarity index 100% rename from v2/ssd/data/prepare_voc_data.py rename to legacy/ssd/data/prepare_voc_data.py diff --git a/v2/ssd/data_provider.py b/legacy/ssd/data_provider.py similarity index 100% rename from v2/ssd/data_provider.py rename to legacy/ssd/data_provider.py diff --git a/v2/ssd/eval.py b/legacy/ssd/eval.py similarity index 100% rename from v2/ssd/eval.py rename to legacy/ssd/eval.py diff --git a/v2/ssd/image_util.py b/legacy/ssd/image_util.py similarity index 100% rename from v2/ssd/image_util.py rename to legacy/ssd/image_util.py diff --git a/v2/ssd/images/SSD300x300_map.png b/legacy/ssd/images/SSD300x300_map.png similarity index 100% rename from v2/ssd/images/SSD300x300_map.png rename to legacy/ssd/images/SSD300x300_map.png diff --git a/v2/ssd/images/ssd_network.png b/legacy/ssd/images/ssd_network.png similarity index 100% rename from v2/ssd/images/ssd_network.png rename to legacy/ssd/images/ssd_network.png diff --git a/v2/ssd/images/vis_1.jpg b/legacy/ssd/images/vis_1.jpg similarity index 100% rename from v2/ssd/images/vis_1.jpg rename to legacy/ssd/images/vis_1.jpg diff --git a/v2/ssd/images/vis_2.jpg b/legacy/ssd/images/vis_2.jpg similarity index 100% rename from v2/ssd/images/vis_2.jpg rename to legacy/ssd/images/vis_2.jpg diff --git a/v2/ssd/images/vis_3.jpg b/legacy/ssd/images/vis_3.jpg similarity index 100% rename from v2/ssd/images/vis_3.jpg rename to legacy/ssd/images/vis_3.jpg diff --git a/v2/ssd/images/vis_4.jpg b/legacy/ssd/images/vis_4.jpg similarity index 100% rename from v2/ssd/images/vis_4.jpg rename to legacy/ssd/images/vis_4.jpg diff --git a/v2/ssd/infer.py b/legacy/ssd/infer.py similarity index 100% rename from v2/ssd/infer.py rename to legacy/ssd/infer.py diff --git a/v2/ssd/train.py b/legacy/ssd/train.py similarity index 100% rename from v2/ssd/train.py rename to legacy/ssd/train.py diff --git a/v2/ssd/vgg_ssd_net.py b/legacy/ssd/vgg_ssd_net.py similarity index 100% rename from v2/ssd/vgg_ssd_net.py rename to legacy/ssd/vgg_ssd_net.py diff --git a/v2/ssd/visual.py b/legacy/ssd/visual.py similarity index 100% rename from v2/ssd/visual.py rename to legacy/ssd/visual.py diff --git a/v2/text_classification/.gitignore b/legacy/text_classification/.gitignore similarity index 100% rename from v2/text_classification/.gitignore rename to legacy/text_classification/.gitignore diff --git a/v2/text_classification/README.md b/legacy/text_classification/README.md similarity index 100% rename from v2/text_classification/README.md rename to legacy/text_classification/README.md diff --git a/v2/text_classification/images/cnn_net.png b/legacy/text_classification/images/cnn_net.png similarity index 100% rename from v2/text_classification/images/cnn_net.png rename to legacy/text_classification/images/cnn_net.png diff --git a/v2/text_classification/images/dnn_net.png b/legacy/text_classification/images/dnn_net.png similarity index 100% rename from v2/text_classification/images/dnn_net.png rename to legacy/text_classification/images/dnn_net.png diff --git a/v2/text_classification/infer.py b/legacy/text_classification/infer.py similarity index 100% rename from v2/text_classification/infer.py rename to legacy/text_classification/infer.py diff --git a/v2/text_classification/network_conf.py b/legacy/text_classification/network_conf.py similarity index 100% rename from v2/text_classification/network_conf.py rename to legacy/text_classification/network_conf.py diff --git a/v2/text_classification/reader.py b/legacy/text_classification/reader.py similarity index 100% rename from v2/text_classification/reader.py rename to legacy/text_classification/reader.py diff --git a/v2/text_classification/run.sh b/legacy/text_classification/run.sh similarity index 100% rename from v2/text_classification/run.sh rename to legacy/text_classification/run.sh diff --git a/v2/text_classification/train.py b/legacy/text_classification/train.py similarity index 100% rename from v2/text_classification/train.py rename to legacy/text_classification/train.py diff --git a/v2/text_classification/utils.py b/legacy/text_classification/utils.py similarity index 100% rename from v2/text_classification/utils.py rename to legacy/text_classification/utils.py diff --git a/v2/youtube_recall/README.cn.md b/legacy/youtube_recall/README.cn.md similarity index 100% rename from v2/youtube_recall/README.cn.md rename to legacy/youtube_recall/README.cn.md diff --git a/v2/youtube_recall/README.md b/legacy/youtube_recall/README.md similarity index 100% rename from v2/youtube_recall/README.md rename to legacy/youtube_recall/README.md diff --git a/v2/youtube_recall/data/data.tar b/legacy/youtube_recall/data/data.tar similarity index 100% rename from v2/youtube_recall/data/data.tar rename to legacy/youtube_recall/data/data.tar diff --git a/v2/youtube_recall/data_processor.py b/legacy/youtube_recall/data_processor.py similarity index 100% rename from v2/youtube_recall/data_processor.py rename to legacy/youtube_recall/data_processor.py diff --git a/v2/youtube_recall/images/model_network.png b/legacy/youtube_recall/images/model_network.png similarity index 100% rename from v2/youtube_recall/images/model_network.png rename to legacy/youtube_recall/images/model_network.png diff --git a/v2/youtube_recall/images/recommendation_system.png b/legacy/youtube_recall/images/recommendation_system.png similarity index 100% rename from v2/youtube_recall/images/recommendation_system.png rename to legacy/youtube_recall/images/recommendation_system.png diff --git a/v2/youtube_recall/infer.py b/legacy/youtube_recall/infer.py similarity index 100% rename from v2/youtube_recall/infer.py rename to legacy/youtube_recall/infer.py diff --git a/v2/youtube_recall/infer_user.py b/legacy/youtube_recall/infer_user.py similarity index 100% rename from v2/youtube_recall/infer_user.py rename to legacy/youtube_recall/infer_user.py diff --git a/v2/youtube_recall/item_vector.py b/legacy/youtube_recall/item_vector.py similarity index 100% rename from v2/youtube_recall/item_vector.py rename to legacy/youtube_recall/item_vector.py diff --git a/v2/youtube_recall/network_conf.py b/legacy/youtube_recall/network_conf.py similarity index 100% rename from v2/youtube_recall/network_conf.py rename to legacy/youtube_recall/network_conf.py diff --git a/v2/youtube_recall/reader.py b/legacy/youtube_recall/reader.py similarity index 100% rename from v2/youtube_recall/reader.py rename to legacy/youtube_recall/reader.py diff --git a/v2/youtube_recall/train.py b/legacy/youtube_recall/train.py similarity index 100% rename from v2/youtube_recall/train.py rename to legacy/youtube_recall/train.py diff --git a/v2/youtube_recall/user_vector.py b/legacy/youtube_recall/user_vector.py similarity index 100% rename from v2/youtube_recall/user_vector.py rename to legacy/youtube_recall/user_vector.py diff --git a/v2/youtube_recall/utils.py b/legacy/youtube_recall/utils.py similarity index 100% rename from v2/youtube_recall/utils.py rename to legacy/youtube_recall/utils.py