diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 5c92a29bb33436c6e8d2be5c2365f04e0a39926b..66e97caa7059144d47a7825aabafc8b231a2145a 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -47,14 +47,14 @@ paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.Paralle paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.GradientScaleStrategy, arg0: int) -> None paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy.ReduceStrategy, arg0: int) -> None paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core.ParallelExecutor.BuildStrategy) -> None -paddle.fluid.io.save_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', 'b55d6193a1d4198d45b013fc5779e1f2')) +paddle.fluid.io.save_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '869104f47e6fd21d897c3fcc426aa942')) paddle.fluid.io.save_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '3a7a99abac3e1bf898871fe609354218')) paddle.fluid.io.save_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '9141bb5f32caf7975eb3fd88c8a1b2da')) -paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '0a5308f496632ab1ec3ba1f1377e6f95')) +paddle.fluid.io.load_vars (ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)), ('document', '1bb9454cf09d71f190bb51550c5a3ac9')) paddle.fluid.io.load_params (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '41779819cef32f2246e83aebc5a002e2')) paddle.fluid.io.load_persistables (ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)), ('document', '28df5bfe26ca7a077f91156abb0fe6d2')) paddle.fluid.io.save_inference_model (ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)), ('document', 'af82e1b5fe5764029905a191b987f63d')) -paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '648f64d8fd81572eef34a69e533459ef')) +paddle.fluid.io.load_inference_model (ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename', 'pserver_endpoints'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'aaf3392332f2e5ef9d9177985be2c04a')) paddle.fluid.io.PyReader.__init__ (ArgSpec(args=['self', 'feed_list', 'capacity', 'use_double_buffer', 'iterable'], varargs=None, keywords=None, defaults=(True, False)), ('document', '6adf97f83acf6453d4a6a4b1070f3754')) paddle.fluid.io.PyReader.decorate_batch_generator (ArgSpec(args=['self', 'reader', 'places'], varargs=None, keywords=None, defaults=(None,)), ('document', '4a072de39998ee4e0de33fcec11325a6')) paddle.fluid.io.PyReader.decorate_sample_generator (ArgSpec(args=['self', 'sample_generator', 'batch_size', 'drop_last', 'places'], varargs=None, keywords=None, defaults=(True, None)), ('document', '3db4b24d33fe4f711e303f9673dc5c6a')) diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index b573093c3025acead94cc0019f69ec8ca8e1527f..b52809f15f74d027035e722f49341e6222aa894e 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -144,27 +144,37 @@ def save_vars(executor, Examples: .. code-block:: python - exe = fluid.Executor(fluid.CPUPlace()) - param_path = "./my_paddle_model" + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + param_path = "./my_paddle_model" # The first usage: using `main_program` to specify variables def name_has_fc(var): res = "fc" in var.name return res - - prog = fluid.default_main_program() - fluid.io.save_vars(executor=exe, dirname=path, main_program=prog, + fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate = name_has_fc) # All variables in `main_program` whose name includes "fc" will be saved. # And variables are going to be saved separately. # The second usage: using `vars` to specify variables - var_list = [var_a, var_b, var_c] + var_list = [w, b] + path = "./my_paddle_vars" fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, filename="vars_file") # var_a, var_b and var_c will be saved. And they are going to be - # saved in the same file named 'var_file' in the path "./my_paddle_model". + # saved in the same file named 'var_file' in the path "./my_paddle_vars". """ save_dirname = os.path.normpath(dirname) if vars is None: @@ -546,27 +556,40 @@ def load_vars(executor, Examples: .. code-block:: python - exe = fluid.Executor(fluid.CPUPlace()) - param_path = "./my_paddle_model" + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32', name='fc_w') + b = fluid.layers.create_parameter(shape=[200], dtype='float32', name='fc_b') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + param_path = "./my_paddle_model" # The first usage: using `main_program` to specify variables def name_has_fc(var): res = "fc" in var.name return res - - prog = fluid.default_main_program() - fluid.io.load_vars(executor=exe, dirname=path, main_program=prog, + fluid.io.save_vars(executor=exe, dirname=param_path, main_program=main_prog, + vars=None, predicate=name_has_fc) + fluid.io.load_vars(executor=exe, dirname=param_path, main_program=main_prog, vars=None, predicate=name_has_fc) # All variables in `main_program` whose name includes "fc" will be loaded. # And all the variables are supposed to have been saved in differnet files. - # The second usage: using `vars` to specify variables - var_list = [var_a, var_b, var_c] + path = "./my_paddle_vars" + var_list = [w, b] + fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, filename="vars_file") - # var_a, var_b and var_c will be loaded. And they are supposed to haven - # been saved in the same file named 'var_file' in the path "./my_paddle_model". + # w and b will be loaded. And they are supposed to haven + # been saved in the same file named 'var_file' in the path "./my_paddle_vars". """ load_dirname = os.path.normpath(dirname) if vars is None: @@ -1088,25 +1111,43 @@ def load_inference_model(dirname, Examples: .. code-block:: python - exe = fluid.Executor(fluid.CPUPlace()) + import paddle.fluid as fluid + import numpy as np + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + w = fluid.layers.create_parameter(shape=[784, 200], dtype='float32') + b = fluid.layers.create_parameter(shape=[200], dtype='float32') + hidden_w = fluid.layers.matmul(x=data, y=w) + hidden_b = fluid.layers.elementwise_add(hidden_w, b) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) path = "./infer_model" - endpoints = ["127.0.0.1:2023","127.0.0.1:2024"] - [inference_program, feed_target_names, fetch_targets] = + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], + target_vars=[hidden_b], executor=exe, main_program=main_prog) + tensor_img = np.array(np.random.random((1, 64, 784)), dtype=np.float32) + [inference_program, feed_target_names, fetch_targets] = \ fluid.io.load_inference_model(dirname=path, executor=exe) results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) + # endpoints is your pserver endpoints list, the above is just an example + endpoints = ["127.0.0.1:2023","127.0.0.1:2024"] # if we need lookup table, we will use: - fluid.io.load_inference_model(dirname=path, executor=exe, pserver_endpoints=endpoints) + [dist_inference_program, dist_feed_target_names, dist_fetch_targets] = \ + fluid.io.load_inference_model(dirname=path, + executor=exe, + pserver_endpoints=endpoints) # In this example, the inference program was saved in the # "./infer_model/__model__" and parameters were saved in - # separate files in ""./infer_model". + # separate files in "./infer_model". # After getting inference program, feed target names and # fetch targets, we can use an Executor to run the inference # program to get the inference result. - """ load_dirname = os.path.normpath(dirname) if not os.path.isdir(load_dirname): diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index d8aace9fdfa601413bb4d4b1b2a309ba6a8e4ece..00f8fc815b3fd54dc609f2cbe9e60a685ffbc7ee 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -124,11 +124,21 @@ class L2DecayRegularizer(WeightDecayRegularizer): Examples: .. code-block:: python + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = fluid.layers.fc(input=data, size=128, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) optimizer = fluid.optimizer.Adagrad( learning_rate=1e-4, regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=0.1)) - optimizer.minimize(avg_cost) + optimizer.minimize(avg_loss) """ def __init__(self, regularization_coeff=0.0): @@ -183,11 +193,21 @@ class L1DecayRegularizer(WeightDecayRegularizer): Examples: .. code-block:: python + import paddle.fluid as fluid + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data = fluid.layers.data(name='image', shape=[3, 28, 28], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = fluid.layers.fc(input=data, size=128, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) optimizer = fluid.optimizer.Adagrad( learning_rate=1e-4, regularization=fluid.regularizer.L1DecayRegularizer( regularization_coeff=0.1)) - optimizer.minimize(avg_cost) + optimizer.minimize(avg_loss) """ def __init__(self, regularization_coeff=0.0):