未验证 提交 fdefa1e5 编写于 作者: W Wilber 提交者: GitHub

cherry-pick en doc test=release/1.6 test=document_fix (#20486)

modify en api doc

- leaky_relu
- less_than 
- log
- logical_and
- logical_or
- logical_xor
- logical_not
上级 be52f333
......@@ -218,7 +218,7 @@ paddle.fluid.layers.random_crop (ArgSpec(args=['x', 'shape', 'seed'], varargs=No
paddle.fluid.layers.mean_iou (ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None), ('document', 'dea29c0c3cdbd5b498afef60e58c9d7c'))
paddle.fluid.layers.relu (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '0942c174f4f6fb274976d4357356f6a2'))
paddle.fluid.layers.selu (ArgSpec(args=['x', 'scale', 'alpha', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '3ee40bc474b4bccdaf112d3f0d847318'))
paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '02f668664e3bfc4df6c00d7363467140'))
paddle.fluid.layers.log (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '7dc5fe72f1b3f0b6d903a2594de2521d'))
paddle.fluid.layers.crop (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', '32196a194f757b4da114a595a5bc6414'))
paddle.fluid.layers.crop_tensor (ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)), ('document', 'd460aaf35afbbeb9beea4789aa6e4343'))
paddle.fluid.layers.rank_loss (ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '6d49ba251e23f32cb09df54a851bb960'))
......@@ -231,7 +231,7 @@ paddle.fluid.layers.hard_sigmoid (ArgSpec(args=['x', 'slope', 'offset', 'name'],
paddle.fluid.layers.swish (ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)), ('document', 'e0dc7bc66cba939033bc028d7a62c5f4'))
paddle.fluid.layers.prelu (ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', 'cb417a61f701c937f33d057fe85203ab'))
paddle.fluid.layers.brelu (ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None)), ('document', '49580538249a52c857fce75c94ad8af7'))
paddle.fluid.layers.leaky_relu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None)), ('document', '1eb3009c69060299ec87949ee0d4b9ae'))
paddle.fluid.layers.leaky_relu (ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None)), ('document', '11352d3780f62952ea3332658714758c'))
paddle.fluid.layers.soft_relu (ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None)), ('document', 'f14efa9e5fd2e8b3d976cdda38eff43f'))
paddle.fluid.layers.flatten (ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '424ff350578992f201f2c5c30959ef89'))
paddle.fluid.layers.sequence_mask (ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None)), ('document', '6c3f916921b24edaad220f1fcbf039de'))
......@@ -263,10 +263,10 @@ paddle.fluid.layers.strided_slice (ArgSpec(args=['input', 'axes', 'starts', 'end
paddle.fluid.layers.shape (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', '39534cccdb8e727e287316c7c42e6663'))
paddle.fluid.layers.rank (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'a4492cf0393c6f70e4e25c681dcd73f4'))
paddle.fluid.layers.size (ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None), ('document', 'cf2e156beae36378722666c4c33bebfe'))
paddle.fluid.layers.logical_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '12db97c6c459c0f240ec7006737174f2'))
paddle.fluid.layers.logical_or (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '15adbc561618b7db69671e02009bea67'))
paddle.fluid.layers.logical_xor (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '77ccf37b710c507dd97e03f08ce8bb29'))
paddle.fluid.layers.logical_not (ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6e2fe8a322ec69811f6507d22acf8f9f'))
paddle.fluid.layers.logical_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '352f1a6ac79c63cf5e3ca10ee7645daf'))
paddle.fluid.layers.logical_or (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '2b00d1a6f1d994ec9eba17304b92bf20'))
paddle.fluid.layers.logical_xor (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '58f3e4fb6b1bd451cb0a9c2d5edd5f47'))
paddle.fluid.layers.logical_not (ArgSpec(args=['x', 'out', 'name'], varargs=None, keywords=None, defaults=(None, None)), ('document', '75fa78bea3ba82366dd99d2f92da56ef'))
paddle.fluid.layers.clip (ArgSpec(args=['x', 'min', 'max', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '4ad0d96a149f023cb72199ded4ce6e9d'))
paddle.fluid.layers.clip_by_norm (ArgSpec(args=['x', 'max_norm', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', 'a5f4917fda557ceb834168cdbec6d51b'))
paddle.fluid.layers.mean (ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,)), ('document', '597257fb94d0597c404a6a5c91ab5258'))
......@@ -349,7 +349,7 @@ paddle.fluid.layers.Switch.default (ArgSpec(args=['self'], varargs=None, keyword
paddle.fluid.layers.increment (ArgSpec(args=['x', 'value', 'in_place'], varargs=None, keywords=None, defaults=(1.0, True)), ('document', 'f88b5787bb80ae6b8bf513a70dabbdc1'))
paddle.fluid.layers.array_write (ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)), ('document', '3f913b5069ad40bd85d89b33e4aa5939'))
paddle.fluid.layers.create_array (ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None), ('document', '556de793fdf24d515f3fc91260e2c048'))
paddle.fluid.layers.less_than (ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords=None, defaults=(None, None)), ('document', '04af32422c3a3d8f6040aeb406c82768'))
paddle.fluid.layers.less_than (ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords=None, defaults=(None, None)), ('document', '329bdde01cba69463b08b8c13015560a'))
paddle.fluid.layers.less_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '04e5623dd39b4437b9b08e0ce11071ca'))
paddle.fluid.layers.greater_than (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '135352e24251238122bb7823dd4a49aa'))
paddle.fluid.layers.greater_equal (ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords=None, defaults=(None,)), ('document', '44bdacd11299d72c0a52d2181e7ae6ca'))
......
......@@ -361,9 +361,14 @@ $$out = \tanh^{-1}(x)$$
class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of LeakyRelu operator");
AddOutput("Out", "Output of LeakyRelu operator");
AddAttr<float>("alpha", "The small negative slope").SetDefault(0.02f);
AddInput("X",
"A LoDTensor or Tensor representing preactivation values. Must be "
"one of the following types: float32, float64.");
AddOutput(
"Out",
"A LoDTensor or Tensor with the same type and size as that of x.");
AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
.SetDefault(0.02f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false);
......@@ -374,7 +379,7 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
AddComment(R"DOC(
LeakyRelu Activation Operator.
$out = \max(x, \alpha * x)$
$$out = \max(x, \alpha * x)$$
)DOC");
}
......
......@@ -23,18 +23,16 @@ class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X",
string::Sprintf("(LoDTensor) Left hand operand of %s operator",
AddInput("X", string::Sprintf("Left hand operand of %s operator. Must be "
"a LoDTensor or Tensor of type bool.",
comment.type));
AddInput("Y",
string::Sprintf("(LoDTensor) Right hand operand of %s operator",
AddInput("Y", string::Sprintf("Right hand operand of %s operator. Must be "
"a LoDTensor or Tensor of type bool.",
comment.type));
AddOutput("Out", string::Sprintf(
"(LoDTensor) n-dim bool tensor. Each element is %s",
comment.equation));
AddOutput("Out", string::Sprintf("n-dim bool LoDTensor or Tensor"));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean tensors.
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by %s
)DOC",
comment.type, comment.equation));
......@@ -46,14 +44,13 @@ class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
OpComment comment;
AddInput("X", string::Sprintf("(LoDTensor) Operand of %s operator",
AddInput("X", string::Sprintf("Operand of %s operator. Must be "
"a LoDTensor or Tensor of type bool.",
comment.type));
AddOutput("Out", string::Sprintf(
"(LoDTensor) n-dim bool tensor. Each element is %s",
comment.equation));
AddOutput("Out", string::Sprintf("n-dim bool LoDTensor or Tensor."));
AddComment(string::Sprintf(R"DOC(%s Operator
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean tensors.
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by %s
)DOC",
comment.type, comment.equation));
......@@ -65,9 +62,9 @@ class BinaryLogicalOpInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
OpComment comment;
PADDLE_ENFORCE(context->HasInput("X"),
PADDLE_ENFORCE_EQ(context->HasInput("X"), true,
"Input(X) of %s operator must not be null", comment.type);
PADDLE_ENFORCE(context->HasInput("Y"),
PADDLE_ENFORCE_EQ(context->HasInput("Y"), true,
"Input(Y) of %s operator must not be null", comment.type);
auto dim_x = context->GetInputDim("X");
auto dim_y = context->GetInputDim("Y");
......@@ -92,7 +89,7 @@ class UnaryLogicalOpInferShape : public framework::InferShapeBase {
public:
void operator()(framework::InferShapeContext *context) const override {
OpComment comment;
PADDLE_ENFORCE(context->HasInput("X"),
PADDLE_ENFORCE_EQ(context->HasInput("X"), true,
"Input(X) of %s operator must not be null", comment.type);
context->SetOutputDim("Out", context->GetInputDim("X"));
context->ShareLoD("X", "Out");
......
......@@ -1020,9 +1020,24 @@ def less_than(x, y, force_cpu=None, cond=None):
.. code-block:: python
import paddle.fluid as fluid
label = fluid.layers.data(name='y', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], dtype='int64', value=5)
cond = fluid.layers.less_than(x=label, y=limit)
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
"""
helper = LayerHelper("less_than", **locals())
if cond is None:
......
......@@ -10524,20 +10524,31 @@ def log(x, name=None):
Out = \\ln(x)
Args:
x (Variable): Input tensor.
name (str|None, default None): A name for this layer If set None,
the layer will be named automatically.
x (Variable): Input LoDTensor or Tensor. Must be one of the following types: float32, float64.
name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Variable: The natural log of the input tensor computed element-wise.
Variable: The natural log of the input LoDTensor or Tensor computed element-wise.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32")
output = fluid.layers.log(x)
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[1], dtype="float32")
res = fluid.layers.log(x)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1], [2]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[0.], [0.6931472]]
"""
helper = LayerHelper('log', **locals())
dtype = helper.input_dtype(input_param_name='x')
......@@ -11607,8 +11618,8 @@ def leaky_relu(x, alpha=0.02, name=None):
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
output(${out_type}): ${out_comment}
......@@ -11617,8 +11628,19 @@ def leaky_relu(x, alpha=0.02, name=None):
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[2,3,16,16], dtype="float32")
y = fluid.layers.leaky_relu(x, alpha=0.01)
import numpy as np
# Graph Organizing
x = fluid.layers.data(name="x", shape=[2], dtype="float32")
res = fluid.layers.leaky_relu(x, alpha=0.1)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[-1, 2], [3, -4]]).astype(np.float32)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[-0.1, 2], [3, -0.4]]
"""
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -13510,26 +13532,46 @@ def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
@templatedoc()
def logical_and(x, y, out=None, name=None):
"""
${comment}
logical_and Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \land Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
out(${out_type}): ${out_comment}
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
left = fluid.layers.data(
name='left', shape=[1], dtype='bool')
right = fluid.layers.data(
name='right', shape=[1], dtype='bool')
result = fluid.layers.logical_and(x=left, y=right)
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_and(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_and(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, False], [False, False]]
"""
return _logical_op(
......@@ -13539,26 +13581,46 @@ def logical_and(x, y, out=None, name=None):
@templatedoc()
def logical_or(x, y, out=None, name=None):
"""
${comment}
logical_or Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = X \lor Y
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
out(${out_type}): ${out_comment}
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
left = fluid.layers.data(
name='left', shape=[1], dtype='bool')
right = fluid.layers.data(
name='right', shape=[1], dtype='bool')
result = fluid.layers.logical_or(x=left, y=right)
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_or(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_or(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[True, True], [False, True]]
"""
return _logical_op(
......@@ -13568,26 +13630,46 @@ def logical_or(x, y, out=None, name=None):
@templatedoc()
def logical_xor(x, y, out=None, name=None):
"""
${comment}
logical_xor Operator
It operates element-wise on X and Y, and returns the Out. X, Y and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = (X \lor Y) \land \lnot (X \land Y)
Args:
x(${x_type}): ${x_comment}
y(${y_type}): ${y_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
out(LoDTensor or Tensor): The LoDTensor or Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
out(${out_type}): ${out_comment}
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
left = fluid.layers.data(
name='left', shape=[1], dtype='bool')
right = fluid.layers.data(
name='right', shape=[1], dtype='bool')
result = fluid.layers.logical_xor(x=left, y=right)
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
y = fluid.layers.data(name='y', shape=[2], dtype='bool')
res = fluid.layers.logical_xor(x=x, y=y)
# The comment lists another available method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_xor(x=x, y=y, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0], [0, 1]]).astype(np.bool)
y_i = np.array([[1, 1], [0, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[res])
print(res_val) # [[False, True], [False, True]]
"""
return _logical_op(
......@@ -13597,23 +13679,43 @@ def logical_xor(x, y, out=None, name=None):
@templatedoc()
def logical_not(x, out=None, name=None):
"""
${comment}
logical_not Operator
It operates element-wise on X, and returns the Out. X and Out are N-dim boolean LoDTensor or Tensor.
Each element of Out is calculated by
.. math::
Out = \lnot X
Args:
x(${x_type}): ${x_comment}
out(Tensor): Output tensor of logical operation.
name(basestring|None): Name of the output.
out(LoDTensor/Tensor): The LoDTensor/Tensor that specifies the output of the operator, which can be any Variable that has been created in the program. The default value is None, and a new Variable will be created to save the output.
name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
out(${out_type}): ${out_comment}
${out_type}: ${out_comment}
Examples:
.. code-block:: python
import paddle.fluid as fluid
left = fluid.layers.data(
name='left', shape=[1], dtype='bool')
result = fluid.layers.logical_not(x=left)
import numpy as np
# Graph organizing
x = fluid.layers.data(name='x', shape=[2], dtype='bool')
res = fluid.layers.logical_not(x)
# The comment lists another availble method.
# res = fluid.layers.fill_constant(shape=[2], dtype='bool', value=0)
# fluid.layers.logical_not(x, out=res)
# Create an executor using CPU as an example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 0]]).astype(np.bool)
res_val, = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res])
print(res_val) # [[False, True]]
"""
return _logical_op(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册