diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index fc26715d7cc4e82723ee2ab6b02c68dfc7dc3279..b82434d0588accd967c1929d4380427ce90b17ee 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -152,6 +152,12 @@ def monkey_patch_math_tensor(): def _ndim_(var): return len(var.shape) + def ndimension(var): + return len(var.shape) + + def dim(var): + return len(var.shape) + @property def _size_(var): return int(np.prod(var.shape)) @@ -174,8 +180,8 @@ def monkey_patch_math_tensor(): ('__len__', _len_), ('__index__', _index_), ('astype', astype), - ('dim', lambda x: len(x.shape)), - ('ndimension', lambda x: len(x.shape)), + ('dim', dim), + ('ndimension', ndimension), ('ndim', _ndim_), ('size', _size_), ('T', _T_), diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index a9ba6f91a1e259f89ed12497f313eb5aa6936259..823f790f41030e6156dbd106abfa7471f846e3e0 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1260,6 +1260,7 @@ class Variable(metaclass=VariableMetaClass): In Static Graph Mode: .. code-block:: python + :name: code-example-1 import paddle.fluid as fluid cur_program = fluid.Program() @@ -1271,6 +1272,7 @@ class Variable(metaclass=VariableMetaClass): In Dygraph Mode: .. code-block:: python + :name: code-example-2 import paddle.fluid as fluid import numpy as np @@ -5743,21 +5745,22 @@ class Program: use :code:`clone` after :code:`Opimizer.minimize`, but we still recommend you to use :code:`clone` before using :code:`Opimizer.minimize`. - For Example: - :: + Examples: + .. code-block:: python + :name: code-example-1 - import paddle - import paddle.static as static + import paddle + import paddle.static as static - paddle.enable_static() + paddle.enable_static() - img = static.data(name='image', shape=[None, 784]) - pred = static.nn.fc(x=img, size=10, actvation='relu') - loss = paddle.mean(pred) - # Here we use clone before Momentum - test_program = static.default_main_program().clone(for_test=True) - optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) - optimizer.minimize(loss) + img = static.data(name='image', shape=[None, 784]) + pred = static.nn.fc(x=img, size=10, actvation='relu') + loss = paddle.mean(pred) + # Here we use clone before Momentum + test_program = static.default_main_program().clone(for_test=True) + optimizer = paddle.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + optimizer.minimize(loss) Args: @@ -5778,6 +5781,7 @@ class Program: after :code:`clone`: .. code-block:: python + :name: code-example-2 import paddle @@ -5795,6 +5799,7 @@ class Program: 1. To clone a test program, the sample code is: .. code-block:: python + :name: code-example-3 import paddle import paddle.static as static @@ -5847,6 +5852,7 @@ class Program: 2. The clone method can be avoid if you create program for training and program for testing individually. .. code-block:: python + :name: code-example-4 import paddle import paddle.static as static @@ -7235,30 +7241,32 @@ def program_guard(main_program, startup_program=None): Default: None. Examples: - .. code-block:: python + .. code-block:: python + :name: code-example-1 - import paddle + import paddle - paddle.enable_static() - main_program = paddle.static.Program() - startup_program = paddle.static.Program() - with paddle.static.program_guard(main_program, startup_program): - data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') - hidden = paddle.static.nn.fc(x=data, size=10, activation='relu') + paddle.enable_static() + main_program = paddle.static.Program() + startup_program = paddle.static.Program() + with paddle.static.program_guard(main_program, startup_program): + data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + hidden = paddle.static.nn.fc(x=data, size=10, activation='relu') Notes: The temporary :code:`Program` can be used if the user does not need to construct either of startup program or main program. Examples: - .. code-block:: python + .. code-block:: python + :name: code-example-2 - import paddle + import paddle - paddle.enable_static() - main_program = paddle.static.Program() - # does not care about startup program. Just pass a temporary value. - with paddle.static.program_guard(main_program, paddle.static.Program()): - data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') + paddle.enable_static() + main_program = paddle.static.Program() + # does not care about startup program. Just pass a temporary value. + with paddle.static.program_guard(main_program, paddle.static.Program()): + data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') """ from .data_feeder import check_type diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 41299a624c12b27a1b36e1e7330d5a554bb784f4..4191b6a8142b7f2f41454cac8c244f743ce17c29 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -323,6 +323,48 @@ def monkey_patch_variable(): """ return len(self.shape) + def ndimension(self): + """ + Returns the dimension of current Variable + + Returns: + the dimension + + Examples: + .. code-block:: python + + import paddle + + paddle.enable_static() + + # create a static Variable + x = paddle.static.data(name='x', shape=[3, 2, 1]) + # print the dimension of the Variable + print(x.ndimension) + """ + return len(self.shape) + + def dim(self): + """ + Returns the dimension of current Variable + + Returns: + the dimension + + Examples: + .. code-block:: python + + import paddle + + paddle.enable_static() + + # create a static Variable + x = paddle.static.data(name='x', shape=[3, 2, 1]) + # print the dimension of the Variable + print(x.dim) + """ + return len(self.shape) + def _scalar_add_(var, value): return _scalar_op_(var, 1.0, value) @@ -509,8 +551,8 @@ def monkey_patch_variable(): ('append', append), ('item', _item), ('pop', pop), - ('dim', lambda x: len(x.shape)), - ('ndimension', lambda x: len(x.shape)), + ('dim', dim), + ('ndimension', ndimension), ('ndim', _ndim_), ( '__add__', diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 007b417a98b116e9e992b26f4e9d8e0a7366210d..c48128d2c083f92ae668f5d49e7afad2896c1bb7 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -896,16 +896,18 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): 3. If it is in static graph mode, any tensors or operations created outside or inside of ``true_fn`` and ``false_fn`` will be in net building regardless of which branch is selected at runtime. This has frequently - surprised users who expected a lazy semantics. For example: + surprised users who expected a lazy semantics. - .. code-block:: python + Examples: + .. code-block:: python + :name: code-example-1 - import paddle + import paddle - a = paddle.zeros((1, 1)) - b = paddle.zeros((1, 1)) - c = a * b - out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) + a = paddle.zeros((1, 1)) + b = paddle.zeros((1, 1)) + c = a * b + out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) No matter whether ``a < b`` , ``c = a * b`` will be in net building and run. ``a + c`` and ``b * b`` will be in net building, but only one @@ -933,6 +935,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): Examples: .. code-block:: python + :name: code-example-2 import paddle