From 46f6ec8c55f0b4991afa52a32dcc86bbe60f84b9 Mon Sep 17 00:00:00 2001 From: guofei <52460041+gfwm2013@users.noreply.github.com> Date: Sat, 12 Oct 2019 17:00:51 +0800 Subject: [PATCH] Modify English documents (#20452) (#20576) --- paddle/fluid/API.spec | 24 ++-- python/paddle/batch.py | 43 +++++-- python/paddle/fluid/framework.py | 17 +-- python/paddle/fluid/io.py | 188 +++++++++++++++++++------------ python/paddle/fluid/layers/io.py | 85 ++++++++------ python/paddle/fluid/reader.py | 118 ++++++++++++------- 6 files changed, 301 insertions(+), 174 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 7b7347bdaa2..d0738633780 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -71,24 +71,24 @@ paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.c paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core_avx.ParallelExecutor.BuildStrategy) -> None paddle.fluid.gradients (ArgSpec(args=['targets', 'inputs', 'target_gradients', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'e2097e1e0ed84ae44951437bfe269a1b')) paddle.fluid.io.save_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '9ff7159eef501e9dfaf520073e681c10')) -paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '046d7c43d67e08c2660bb3bd7e081015')) -paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'ffcee38044975c29f2ab2fec0576f963')) +paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'a03d0de7594d311103671b7275f1b464')) +paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '3cd8facbe536a09310e95453914ca322')) paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '12dd2c3f29d63f7a920bb1e0a0e8caff')) paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'f3f16db75ae076d46608c7e976650cfc')) paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '1e039084ad3781eb43966581eed48688')) -paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment', 'program_only'], varargs=None, keywords=None, defaults=(None, None, None, True, False)), ('document', 'fc82bfd137a9b1ab8ebd1651bd35b6e5')) +paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment', 'program_only'], varargs=None, keywords=None, defaults=(None, None, None, True, False)), ('document', '827797614e194d31ceb5a3a68c46efab')) paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '7a863032bf7613dec1c8dd99efbd82e5')) -paddle.fluid.io.batch (ArgSpec(args=['reader', 'batch_size', 'drop_last'], varargs=None, keywords=None, defaults=(False,)), ('document', 'cf2869b408b39cadadd95206b4e03b39')) +paddle.fluid.io.batch (ArgSpec(args=['reader', 'batch_size', 'drop_last'], varargs=None, keywords=None, defaults=(False,)), ('document', '16acb4e1215d5fc4386add454e717440')) paddle.fluid.io.save (ArgSpec(args=['program', 'model_path'], varargs=None, keywords=None, defaults=None), ('document', 'cef7d50c36b93c02b6d12bcea7d025ce')) paddle.fluid.io.load (ArgSpec(args=['program', 'model_path'], varargs=None, keywords=None, defaults=None), ('document', '8d0f200c20f8a4581e1843967230ad45')) -paddle.fluid.io.PyReader ('paddle.fluid.reader.PyReader', ('document', 'b03399246f69cd6fc03b43e87af8bd4e')) +paddle.fluid.io.PyReader ('paddle.fluid.reader.PyReader', ('document', 'f5875acee86f9f4432933bab40873722')) paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable', 'return_list'], varargs=None, keywords=None, defaults=(None, None, True, True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) -paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4364e836e3cb8ab5e68e411b763c50c7')) -paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', 'efa4c8b90fe6d99dcbda637b70351bb1')) -paddle.fluid.io.PyReader.decorate_sample_list_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '6c11980092720de304863de98074a64a')) +paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '386f969058852594b916be7cc15b8066')) +paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', 'c3ab4fd82a4560e369adcd23a36c2a7b')) +paddle.fluid.io.PyReader.decorate_sample_list_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '182443165b1b6a607ca821052b1b9085')) paddle.fluid.io.PyReader.next (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '08b2fd1463f3ea99d79d17303988349b')) -paddle.fluid.io.PyReader.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '7432197701fdaab1848063860dc0b97e')) -paddle.fluid.io.PyReader.start (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'a0983fb21a0a51e6a31716009fe9a9c1')) +paddle.fluid.io.PyReader.reset (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '259e025143f0bb7cfc6d7163bc333679')) +paddle.fluid.io.PyReader.start (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', 'c91176b919e19be11cbc11ab3d47318e')) paddle.fluid.io.DataLoader ('paddle.fluid.reader.DataLoader', ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.io.DataLoader.__init__ paddle.fluid.io.DataLoader.from_dataset (ArgSpec(args=['dataset', 'places', 'drop_last'], varargs=None, keywords=None, defaults=(True,)), ('document', 'eb8b6d31e1c2ec2ca8ebbb62fcf46557')) @@ -308,10 +308,10 @@ paddle.fluid.layers.shard_index (ArgSpec(args=['input', 'index_num', 'nshards', paddle.fluid.layers.hard_swish (ArgSpec(args=['x', 'threshold', 'scale', 'offset', 'name'], varargs=None, keywords=None, defaults=(6.0, 6.0, 3.0, None)), ('document', 'bd763b9ca99239d624c3cb4626e3627a')) paddle.fluid.layers.mse_loss (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', '88b967ef5132567396062d5d654b3064')) paddle.fluid.layers.uniform_random (ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=('float32', -1.0, 1.0, 0)), ('document', '34e7c1ff0263baf9551000b6bb3bc47e')) -paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', '9d7806e31bdf727c1a23b8782a09b545')) +paddle.fluid.layers.data (ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)), ('document', 'a43c597ac4e1cec20cf193c083d946be')) paddle.fluid.layers.read_file (ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None), ('document', 'd5b41c7b2df1b064fbd42dcf435268cd')) paddle.fluid.layers.double_buffer (ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '556fa82daf62cbb0fb393f4125daba77')) -paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', 'd78a1c7344955c5caed8dc13adb7beb6')) +paddle.fluid.layers.py_reader (ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)), ('document', 'c9c52b0a57e541d751e7a839ad26ee1a')) paddle.fluid.layers.create_py_reader_by_data (ArgSpec(args=['capacity', 'feed_list', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, True)), ('document', '1321d4ce89d82f96fcfd5601f816b0f3')) paddle.fluid.layers.load (ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,)), ('document', '309f9e5249463e1b207a7347b2a91134')) paddle.fluid.layers.create_tensor (ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'fdc2d964488e99fb0743887454c34e36')) diff --git a/python/paddle/batch.py b/python/paddle/batch.py index 00850966073..f6d2d8eb288 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -17,16 +17,39 @@ __all__ = ['batch'] def batch(reader, batch_size, drop_last=False): """ - Create a batched reader. - - :param reader: the data reader to read from. - :type reader: callable - :param batch_size: size of each mini-batch - :type batch_size: int - :param drop_last: drop the last batch, if the size of last batch is not equal to batch_size. - :type drop_last: bool - :return: the batched reader. - :rtype: callable + This operator creates a batched reader which combines the data from the + input reader to batched data. + + Args: + reader(generator): the data reader to read from. + batch_size(int): size of each mini-batch. + drop_last(bool, optional): If set to True, the last batch is dropped when + the size of last batch is not equal to batch_size, if set to False, + it will not. Default: False. + Returns: + The batched reader. + + Return Type: + generator + + Examples: + .. code-block:: python + + import paddle.fluid as fluid + def reader(): + for i in range(10): + yield i + batch_reader = fluid.io.batch(reader, batch_size=2) + + for data in batch_reader(): + print(data) + + # Output is + # [0, 1] + # [2, 3] + # [4, 5] + # [6, 7] + # [8, 9] """ def batch_reader(): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 939f01cb28a..74452c97d44 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -4516,6 +4516,13 @@ def program_guard(main_program, startup_program=None): Layer functions in the Python `"with"` block will append operators and variables to the new main programs. + Args: + main_program(Program): New main program inside `"with"` statement. + startup_program(Program, optional): New startup program inside `"with"` + statement. :code:`None` means not changing startup program, + default_startup_program is still used. + Default: None. + Examples: .. code-block:: python @@ -4524,7 +4531,7 @@ def program_guard(main_program, startup_program=None): main_program = fluid.Program() startup_program = fluid.Program() with fluid.program_guard(main_program, startup_program): - data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') hidden = fluid.layers.fc(input=data, size=10, act='relu') Notes: The temporary :code:`Program` can be used if the user does not need @@ -4538,12 +4545,8 @@ def program_guard(main_program, startup_program=None): main_program = fluid.Program() # does not care about startup program. Just pass a temporary value. with fluid.program_guard(main_program, fluid.Program()): - data = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - - Args: - main_program(Program): New main program inside `"with"` statement. - startup_program(Program): New startup program inside `"with"` statement. - None means not changing startup program. + data = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + """ if not isinstance(main_program, Program): raise TypeError("main_program should be Program") diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 1f4220e5191..d57aa0a2489 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -258,32 +258,40 @@ def save_vars(executor, def save_params(executor, dirname, main_program=None, filename=None): """ - This function filters out all parameters from the give `main_program` - and then save them to the folder `dirname` or the file `filename`. + This operator saves all parameters from the :code:`main_program` to + the folder :code:`dirname` or file :code:`filename`. You can refer to + :ref:`api_guide_model_save_reader_en` for more details. - Use the `dirname` to specify the saving folder. If you would like to - save parameters in separate files, set `filename` None; if you would - like to save all parameters in a single file, use `filename` to specify + Use the :code:`dirname` to specify the saving folder. If you would like to + save parameters in separate files, set :code:`filename` None; if you would + like to save all parameters in a single file, use :code:`filename` to specify the file name. - NOTICE: Some variables are not Parameter while they are necessary for - training. So you can NOT save and continue your training just by - `save_params()` and `load_params()`. Please use `save_persistables()` - and `load_persistables()` instead. If you want to save your model for - the inference, please use the `save_inference_model` API. You can refer - to :ref:`api_guide_model_save_reader_en` for more details. + Note: + Some variables are not Parameter while they are necessary for + training, such as learning rate, global step, etc. So you can NOT save + and continue your training just by :ref:`api_fluid_io_save_params` + and :ref:`api_fluid_io_load_params`. Please use :ref:`api_fluid_io_save_persistables` + and :ref:`api_fluid_io_load_persistables` instead. + + If you want to save your model for the inference, please use the + :ref:`api_fluid_io_save_inference_model`. You can refer to + :ref:`api_guide_model_save_reader_en` for more details. Args: - executor(Executor): The executor to run for saving parameters. + executor(Executor): The executor to run for saving parameters, You can + refer to :ref:`api_guide_executor_en`. dirname(str): The saving directory path. - main_program(Program|None): The program whose parameters will be - saved. If it is None, the default - main program will be used automatically. - Default: None - filename(str|None): The file to save all parameters. If you prefer - to save parameters in differnet files, set it - to None. - Default: None + main_program(Program, optional): The program whose parameters will be + saved. You can refer to + :ref:`api_guide_Program_en` for more + details. If it is None, the default main + program will be used. + Default: None + filename(str, optional): The file to save all parameters. If you prefer + to save parameters in different files, set it + to None. + Default: None Returns: None @@ -292,12 +300,21 @@ def save_params(executor, dirname, main_program=None, filename=None): .. code-block:: python import paddle.fluid as fluid - + + params_path = "./my_paddle_model" + image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + predict = fluid.layers.fc(input=image, size=10, act='softmax') + + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) + exe = fluid.Executor(fluid.CPUPlace()) - param_path = "./my_paddle_model" - prog = fluid.default_main_program() - fluid.io.save_params(executor=exe, dirname=param_path, - main_program=None) + exe.run(fluid.default_startup_program()) + fluid.io.save_params(executor=exe, dirname=params_path) + # The parameters weights and bias of the fc layer in the network are going to + # be saved in different files in the path "./my_paddle_model" """ save_vars( executor, @@ -491,25 +508,31 @@ def _save_distributed_persistables(executor, dirname, main_program): def save_persistables(executor, dirname, main_program=None, filename=None): """ - This function filters out all variables with `persistable==True` from the - give `main_program` and then saves these variables to the folder `dirname` - or file `filename`. + This operator saves all persistable variables from :code:`main_program` to + the folder :code:`dirname` or file :code:`filename`. You can refer to + :ref:`api_guide_model_save_reader_en` for more details. And then + saves these persistables variables to the folder :code:`dirname` or file + :code:`filename`. - The `dirname` is used to specify the folder where persistable variables + The :code:`dirname` is used to specify the folder where persistable variables are going to be saved. If you would like to save variables in separate - files, set `filename` None; if you would like to save all variables in a - single file, use `filename` to specify the file name. + files, set :code:`filename` None; if you would like to save all variables in a + single file, use :code:`filename` to specify the file name. Args: executor(Executor): The executor to run for saving persistable variables. - dirname(str): The directory path. - main_program(Program|None): The program whose persistbale variables will - be saved. If it is None, the default main - program will be used automatically. - Default: None - filename(str|None): The file to saved all variables. If you prefer to - save variables in differnet files, set it to None. - Default: None + You can refer to :ref:`api_guide_executor_en` for + more details. + dirname(str): The saving directory path. + main_program(Program, optional): The program whose persistbale variables will + be saved. You can refer to + :ref:`api_guide_Program_en` for more details. + If it is None, the default main program will + be used. + Default: None. + filename(str, optional): The file to save all variables. If you prefer to + save variables in different files, set it to None. + Default: None. Returns: None @@ -518,13 +541,22 @@ def save_persistables(executor, dirname, main_program=None, filename=None): .. code-block:: python import paddle.fluid as fluid - + + dir_path = "./my_paddle_model" + file_name = "persistables" + image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) + + predict = fluid.layers.fc(input=image, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=predict, label=label) + avg_loss = fluid.layers.mean(loss) exe = fluid.Executor(fluid.CPUPlace()) - param_path = "./my_paddle_model" - # `prog` can be a program defined by the user - prog = fluid.default_main_program() - fluid.io.save_persistables(executor=exe, dirname=param_path, - main_program=prog) + exe.run(fluid.default_startup_program()) + fluid.io.save_persistables(executor=exe, dirname=dir_path, filename=file_name) + # The persistables variables weights and bias in the fc layer of the network + # are going to be saved in the same file named "persistables" in the path + # "./my_paddle_model" """ if main_program and main_program._is_distributed: _save_distributed_persistables( @@ -973,42 +1005,54 @@ def save_inference_model(dirname, program_only=False): """ Prune the given `main_program` to build a new program especially for inference, - and then save it and all related parameters to given `dirname` by the `executor`. + and then save it and all related parameters to given `dirname` . If you just want to save parameters of your trained model, please use the - `save_params` API. You can refer to :ref:`api_guide_model_save_reader_en` for - more details. + :ref:`api_fluid_io_save_params` . You can refer to :ref:`api_guide_model_save_reader_en` + for more details. + Note: + The :code:`dirname` is used to specify the folder where inference model + structure and parameters are going to be saved. If you would like to save params of + Program in separate files, set `params_filename` None; if you would like to save all + params of Program in a single file, use `params_filename` to specify the file name. Args: dirname(str): The directory path to save the inference model. - feeded_var_names(list[str]): Names of variables that need to be feeded data - during inference. - target_vars(list[Variable]): Variables from which we can get inference - results. - executor(Executor): The executor that saves the inference model. - main_program(Program|None): The original program, which will be pruned to - build the inference model. If is setted None, - the default main program will be used. - Default: None. - model_filename(str|None): The name of file to save the inference program - itself. If is setted None, a default filename - `__model__` will be used. - params_filename(str|None): The name of file to save all related parameters. - If it is setted None, parameters will be saved - in separate files . + feeded_var_names(list[str]): list of string. Names of variables that need to be feeded + data during inference. + target_vars(list[Variable]): list of Variable. Variables from which we can get + inference results. + executor(Executor): The executor that saves the inference model. You can refer + to :ref:`api_guide_executor_en` for more details. + main_program(Program, optional): The original program, which will be pruned to + build the inference model. If is setted None, + the global default :code:`_main_program_` will be used. + Default: None. + model_filename(str, optional): The name of file to save the inference program + itself. If is setted None, a default filename + :code:`__model__` will be used. + params_filename(str, optional): The name of file to save all related parameters. + If it is setted None, parameters will be saved + in separate files . export_for_deployment(bool): If True, programs are modified to only support direct inference deployment. Otherwise, more information will be stored for flexible optimization and re-training. Currently, only True is supported. - program_only(bool): If True, It will save inference program only, and do not save params of Program. + Default: True. + program_only(bool, optional): If True, It will save inference program only, and do not + save params of Program. + Default: False. Returns: - target_var_name_list(list): The fetch variables' name list + The fetch variables' name list + + Return Type: + list Raises: - ValueError: If `feed_var_names` is not a list of basestring. - ValueError: If `target_vars` is not a list of Variable. + ValueError: If `feed_var_names` is not a list of basestring, an exception is thrown. + ValueError: If `target_vars` is not a list of Variable, an exception is thrown. Examples: .. code-block:: python @@ -1018,8 +1062,8 @@ def save_inference_model(dirname, path = "./infer_model" # User defined network, here a softmax regresssion example - image = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + image = fluid.data(name='img', shape=[None, 28, 28], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') feeder = fluid.DataFeeder(feed_list=[image, label], place=fluid.CPUPlace()) predict = fluid.layers.fc(input=image, size=10, act='softmax') @@ -1037,9 +1081,9 @@ def save_inference_model(dirname, target_vars=[predict], executor=exe) - # In this example, the function will prune the default main program - # to make it suitable for infering the `predict` var. The pruned - # inference program is going to be saved in the "./infer_model/__model__" + # In this example, the save_inference_mode inference will prune the default + # main program according to the network's input node (img) and output node(predict). + # The pruned inference program is going to be saved in the "./infer_model/__model__" # and parameters are going to be saved in separate files under folder # "./infer_model". diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index bf94555846c..c006a46d045 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -47,25 +47,25 @@ def data(name, """ **Data Layer** - This function takes in the input and based on whether data has - to be returned back as a minibatch, it creates the global variable by using - the helper functions. The global variables can be accessed by all the - following operators in the graph. + This operator creates the global variable. The global variables can be + accessed by all the following operators in the graph. - All the input variables of this function are passed in as local variables - to the LayerHelper constructor. + Note: + :code:`paddle.fluid.layers.data` is deprecated as it will be removed in + a later version. Please use :code:`paddle.fluid.data` . - Notice that paddle would only use :code:`shape` to infer the shapes of - following variables in the network during compile-time. During run-time, - paddle would not check whether the shape of the feeded data matches the - :code:`shape` settings in this function. + The :code:`paddle.fluid.layers.data` set shape and dtype at compile time + but does NOT check the shape or the dtype of feeded data, this + :code:`paddle.fluid.data` checks the shape and the dtype of data feeded + by Executor or ParallelExecutor during run time. Args: - name(str): The name/alias of the function + name(str): The name/alias of the variable, see :ref:`api_guide_Name` + for more details. shape(list): Tuple declaring the shape. If :code:`append_batch_size` is - True and there is no -1 inside :code:`shape`, it should be - considered as the shape of the each sample. Otherwise, it - should be considered as the shape of the batched data. + True and there is no -1 inside :code:`shape`, it should be + considered as the shape of the each sample. Otherwise, it should + be considered as the shape of the batched data. append_batch_size(bool): 1. If true, it prepends -1 to the shape. For example if shape=[1], the resulting shape is [-1, 1]. This will @@ -74,13 +74,20 @@ def data(name, append_batch_size will be enforced to be be False (ineffective) because PaddlePaddle cannot set more than 1 unknown number on the shape. - dtype(np.dtype|VarType|str): The type of data : float32, float16, int etc - type(VarType): The output type. By default it is LOD_TENSOR. + dtype(np.dtype|VarType|str): The type of the data. Supported dtype: bool, + float16, float32, float64, int8, int16, int32, int64, uint8. + type(VarType): The output type. Supported dtype: VarType.LOD_TENSOR, + VarType.SELECTED_ROWS, VarType.NCCL_ID. Default: VarType.LOD_TENSOR. lod_level(int): The LoD Level. 0 means the input data is not a sequence. + Default: 0. stop_gradient(bool): A boolean that mentions whether gradient should flow. + Default: True. Returns: - Variable: The global variable that gives access to the data. + The global variable that gives access to the data. + + Return Type: + Variable Examples: .. code-block:: python @@ -531,29 +538,43 @@ def py_reader(capacity, """ Create a Python reader for data feeding in Python - This layer returns a Reader Variable. + This operator returns a Reader Variable. The Reader provides :code:`decorate_paddle_reader()` and :code:`decorate_tensor_provider()` to set a Python generator as the data - source. More details :ref:`user_guide_use_py_reader_en` . When - :code:`Executor::Run()` is invoked in C++ side, the data from the generator - would be read automatically. Unlike :code:`DataFeeder.feed()`, the data - reading process and :code:`Executor::Run()` process can run in parallel - using :code:`py_reader`. The :code:`start()` method of the Reader should be - called when each pass begins, while the :code:`reset()` method should be - called when the pass ends and :code:`fluid.core.EOFException` raises. - Note that :code:`Program.clone()` method cannot clone :code:`py_reader`. + source and feed the data from the data source to the Reader Variable. + When :code:`Executor::Run()` is invoked in C++ side, the data from the + generator would be read automatically. Unlike :code:`DataFeeder.feed()`, + the data reading process and :code:`Executor::Run()` process can run in + parallel using :code:`py_reader`. The :code:`start()` method of the Reader + should be called when each pass begins, while the :code:`reset()` method + should be called when the pass ends and :code:`fluid.core.EOFException` raises. + + Note: + :code:`Program.clone()` method cannot clone :code:`py_reader`. You can + refer to :ref:`api_fluid_Program` for more details. + + The :code:`read_file` call needs to be in the program block of :code:`py_reader`. + You can refer to :ref:`api_fluid_layers_read_file` for more details. Args: capacity(int): The buffer capacity maintained by :code:`py_reader`. - shapes(list|tuple): List of tuples which declaring data shapes. - dtypes(list|tuple): List of strs which declaring data type. + shapes(list|tuple): List of tuples which declaring data shapes. shapes[i] + represents the i-th data shape. + dtypes(list|tuple): List of strings which declaring data type. Supported dtype: + bool, float16, float32, float64, int8, int16, int32, int64, uint8. lod_levels(list|tuple): List of ints which declaring data lod_level. - name(basestring): The prefix Python queue name and Reader name. None will - be generated automatically. - use_double_buffer(bool): Whether use double buffer or not. + name(basestring): The default value is None. Normally there is no + need for user to set this property. For more information, please + refer to :ref:`api_guide_Name`. + use_double_buffer(bool): Whether use double buffer or not. The double buffer is + for pre-reading the data of the next batch and copy the data asynchronously + from CPU to GPU. Default is True. Returns: - Variable: A Reader from which we can get feeding data. + A Reader from which we can get feeding data. + + Return Type: + Variable Examples: 1. The basic usage of :code:`py_reader` is as follows: diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 6d2bbe3c1e2..0f61d201967 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -595,7 +595,10 @@ class PyReader(DataLoaderBase): use return_list=True in dygraph mode. Returns: - reader (Reader): the created reader object. + the created reader object. + + Return type: + reader(Reader) Examples: 1. If iterable = False, the created PyReader object is almost the @@ -615,6 +618,11 @@ class PyReader(DataLoaderBase): EPOCH_NUM = 3 ITER_NUM = 5 BATCH_SIZE = 3 + + def network(image, label): + # User-defined network, here is an example of softmax regression. + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) def reader_creator_random_image_and_label(height, width): def reader(): @@ -626,8 +634,8 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return reader - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, @@ -636,8 +644,8 @@ class PyReader(DataLoaderBase): user_defined_reader = reader_creator_random_image_and_label(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE)) - # definition of network is omitted - executor = fluid.Executor(fluid.CUDAPlace(0)) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for i in range(EPOCH_NUM): reader.start() @@ -665,26 +673,35 @@ class PyReader(DataLoaderBase): ITER_NUM = 5 BATCH_SIZE = 10 + def network(image, label): + # User-defined network, here is an example of softmax regression. + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + def reader_creator_random_image(height, width): def reader(): for i in range(ITER_NUM): - yield np.random.uniform(low=0, high=255, size=[height, width]), + fake_image = np.random.uniform(low=0, high=255, size=[height, width]) + fake_label = np.ones([1]) + yield fake_image, fake_label return reader - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=True, return_list=False) + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') + reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False) user_defined_reader = reader_creator_random_image(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_reader, batch_size=BATCH_SIZE), - fluid.core.CUDAPlace(0)) - # definition of network is omitted - executor = fluid.Executor(fluid.CUDAPlace(0)) - executor.run(fluid.default_main_program()) - + fluid.core.CPUPlace()) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) + for _ in range(EPOCH_NUM): for data in reader(): - executor.run(feed=data) + executor.run(feed=data, fetch_list=[loss]) 3. If return_list=True, the return values would be presented as list instead of dict. @@ -745,8 +762,8 @@ class PyReader(DataLoaderBase): Start the data feeding thread. Can only call when the reader object is not iterable. - Example: - .. code-block:: python + Example: + .. code-block:: python import paddle import paddle.fluid as fluid @@ -758,12 +775,12 @@ class PyReader(DataLoaderBase): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) - executor = fluid.Executor(fluid.CUDAPlace(0)) + executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for i in range(3): reader.start() @@ -795,12 +812,12 @@ class PyReader(DataLoaderBase): for i in range(5): yield np.random.uniform(low=0, high=255, size=[784, 784]), - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False) reader.decorate_sample_list_generator( paddle.batch(generator, batch_size=BATCH_SIZE)) - executor = fluid.Executor(fluid.CUDAPlace(0)) + executor = fluid.Executor(fluid.CPUPlace()) executor.run(fluid.default_startup_program()) for i in range(3): reader.start() @@ -848,6 +865,11 @@ class PyReader(DataLoaderBase): EPOCH_NUM = 3 ITER_NUM = 15 BATCH_SIZE = 3 + + def network(image, label): + # User-defined network, here is an example of softmax regression. + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) def random_image_and_label_generator(height, width): def generator(): @@ -859,21 +881,21 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return generator - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int32') + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_sample_generator(user_defined_generator, batch_size=BATCH_SIZE, - places=[fluid.CUDAPlace(0)]) - # definition of network is omitted - executor = fluid.Executor(fluid.CUDAPlace(0)) - executor.run(fluid.default_main_program()) + places=[fluid.CPUPlace()]) + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): - executor.run(feed=data) + executor.run(feed=data, fetch_list=[loss]) ''' self._loader.set_sample_generator(sample_generator, batch_size, @@ -905,6 +927,11 @@ class PyReader(DataLoaderBase): ITER_NUM = 15 BATCH_SIZE = 3 + def network(image, label): + # User-defined network, here is an example of softmax regression. + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) + def random_image_and_label_generator(height, width): def generator(): for i in range(ITER_NUM): @@ -915,21 +942,22 @@ class PyReader(DataLoaderBase): yield fake_image, fake_label return generator - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int32') + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) reader.decorate_sample_list_generator( paddle.batch(user_defined_generator, batch_size=BATCH_SIZE), - fluid.core.CUDAPlace(0)) - # definition of network is omitted - executor = fluid.Executor(fluid.core.CUDAPlace(0)) - executor.run(fluid.default_main_program()) + fluid.core.CPUPlace()) + + loss = network(image, label) + executor = fluid.Executor(fluid.core.CPUPlace()) + executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): - executor.run(feed=data) + executor.run(feed=data, fetch_list=[loss]) ''' self._loader.set_sample_list_generator(reader, places) @@ -958,6 +986,11 @@ class PyReader(DataLoaderBase): EPOCH_NUM = 3 ITER_NUM = 15 BATCH_SIZE = 3 + + def network(image, label): + # User-defined network, here is an example of softmax regression. + predict = fluid.layers.fc(input=image, size=10, act='softmax') + return fluid.layers.cross_entropy(input=predict, label=label) def random_image_and_label_generator(height, width): def generator(): @@ -966,22 +999,25 @@ class PyReader(DataLoaderBase): high=255, size=[BATCH_SIZE, height, width]) batch_label = np.ones([BATCH_SIZE, 1]) + batch_image = batch_image.astype('float32') + batch_label = batch_label.astype('int64') yield batch_image, batch_label return generator - image = fluid.layers.data(name='image', shape=[784, 784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int32') + image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32') + label = fluid.data(name='label', shape=[None, 1], dtype='int64') reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True) user_defined_generator = random_image_and_label_generator(784, 784) - reader.decorate_batch_generator(user_defined_generator, fluid.CUDAPlace(0)) - # definition of network is omitted - executor = fluid.Executor(fluid.CUDAPlace(0)) - executor.run(fluid.default_main_program()) + reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace()) + + loss = network(image, label) + executor = fluid.Executor(fluid.CPUPlace()) + executor.run(fluid.default_startup_program()) for _ in range(EPOCH_NUM): for data in reader(): - executor.run(feed=data) + executor.run(feed=data, fetch_list=[loss]) ''' self._loader.set_batch_generator(reader, places) -- GitLab