提交 67248018 编写于 作者: S sneaxiy

fix conflict

...@@ -167,6 +167,9 @@ paddle.fluid.layers.stanh ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], vara ...@@ -167,6 +167,9 @@ paddle.fluid.layers.stanh ArgSpec(args=['x', 'scale_a', 'scale_b', 'name'], vara
paddle.fluid.layers.hard_sigmoid ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None)) paddle.fluid.layers.hard_sigmoid ArgSpec(args=['x', 'slope', 'offset', 'name'], varargs=None, keywords=None, defaults=(0.2, 0.5, None))
paddle.fluid.layers.swish ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None)) paddle.fluid.layers.swish ArgSpec(args=['x', 'beta', 'name'], varargs=None, keywords=None, defaults=(1.0, None))
paddle.fluid.layers.prelu ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None)) paddle.fluid.layers.prelu ArgSpec(args=['x', 'mode', 'param_attr', 'name'], varargs=None, keywords=None, defaults=(None, None))
paddle.fluid.layers.brelu ArgSpec(args=['x', 't_min', 't_max', 'name'], varargs=None, keywords=None, defaults=(0.0, 24.0, None))
paddle.fluid.layers.leaky_relu ArgSpec(args=['x', 'alpha', 'name'], varargs=None, keywords=None, defaults=(0.02, None))
paddle.fluid.layers.soft_relu ArgSpec(args=['x', 'threshold', 'name'], varargs=None, keywords=None, defaults=(40.0, None))
paddle.fluid.layers.flatten ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)) paddle.fluid.layers.flatten ArgSpec(args=['x', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None))
paddle.fluid.layers.sequence_mask ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None)) paddle.fluid.layers.sequence_mask ArgSpec(args=['x', 'maxlen', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 'int64', None))
paddle.fluid.layers.stack ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)) paddle.fluid.layers.stack ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,))
...@@ -262,26 +265,23 @@ paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defa ...@@ -262,26 +265,23 @@ paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defa
paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.logsigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.exp ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.tanh ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.tanh_shrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.softshrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.softshrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None)
paddle.fluid.layers.sqrt ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.abs ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.logsigmoid ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.ceil ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.exp ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.floor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.tanh ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.cos ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.tanh_shrink ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.sin ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sqrt ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.round ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.abs ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.reciprocal ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.ceil ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.square ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.floor ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.softplus ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.cos ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.softsign ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.sin ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.brelu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.round ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.leaky_relu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.reciprocal ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.soft_relu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) paddle.fluid.layers.square ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.softplus ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.softsign ArgSpec(args=['x', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.uniform_random ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=(None, None, None, None)) paddle.fluid.layers.uniform_random ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=(None, None, None, None))
paddle.fluid.layers.hard_shrink ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.hard_shrink ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.cumsum ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)) paddle.fluid.layers.cumsum ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None))
......
...@@ -54,6 +54,10 @@ class CompileTimeInferShapeContext : public InferShapeContext { ...@@ -54,6 +54,10 @@ class CompileTimeInferShapeContext : public InferShapeContext {
size_t j = 0) const override { size_t j = 0) const override {
PADDLE_ENFORCE_LT(i, Inputs(in).size()); PADDLE_ENFORCE_LT(i, Inputs(in).size());
PADDLE_ENFORCE_LT(j, Outputs(out).size()); PADDLE_ENFORCE_LT(j, Outputs(out).size());
PADDLE_ENFORCE(Inputs(in)[i] != framework::kEmptyVarName,
"The %s[%d] is @EMPTY@", in, i);
PADDLE_ENFORCE(Outputs(out)[j] != framework::kEmptyVarName,
"The %s[%d] is @EMPTY@", out, j);
auto *in_var = block_.FindVarRecursive(Inputs(in)[i]); auto *in_var = block_.FindVarRecursive(Inputs(in)[i]);
auto *out_var = block_.FindVarRecursive(Outputs(out)[j]); auto *out_var = block_.FindVarRecursive(Outputs(out)[j]);
if (in_var->GetType() != proto::VarType::LOD_TENSOR) { if (in_var->GetType() != proto::VarType::LOD_TENSOR) {
...@@ -63,6 +67,7 @@ class CompileTimeInferShapeContext : public InferShapeContext { ...@@ -63,6 +67,7 @@ class CompileTimeInferShapeContext : public InferShapeContext {
PADDLE_ENFORCE_EQ(in_var->GetType(), proto::VarType::LOD_TENSOR, PADDLE_ENFORCE_EQ(in_var->GetType(), proto::VarType::LOD_TENSOR,
"The %d-th output of Output(%s) must be LoDTensor.", j, "The %d-th output of Output(%s) must be LoDTensor.", j,
out); out);
out_var->SetLoDLevel(in_var->GetLoDLevel()); out_var->SetLoDLevel(in_var->GetLoDLevel());
} }
......
...@@ -46,6 +46,16 @@ std::vector<DDim> InferShapeContext::GetReaderDims( ...@@ -46,6 +46,16 @@ std::vector<DDim> InferShapeContext::GetReaderDims(
return this->GetRepeatedDims(arg_names[0]); return this->GetRepeatedDims(arg_names[0]);
} }
void InferShapeContext::ShareLoDs(const std::string &in,
const std::string &out) const {
PADDLE_ENFORCE_EQ(Inputs(in).size(), Outputs(out).size(),
"The number of arguments in %s and %s is not equal.", in,
out);
for (size_t i = 0; i < in.size(); ++i) {
ShareLoD(in, out, i, i);
}
}
DDim InferShapeContext::GetInputsElementDim(const std::string &name, DDim InferShapeContext::GetInputsElementDim(const std::string &name,
int idx) const { int idx) const {
const std::vector<std::string> &names = Inputs(name); const std::vector<std::string> &names = Inputs(name);
......
...@@ -56,6 +56,8 @@ class InferShapeContext { ...@@ -56,6 +56,8 @@ class InferShapeContext {
virtual const std::vector<std::string> &Outputs( virtual const std::vector<std::string> &Outputs(
const std::string &name) const = 0; const std::string &name) const = 0;
void ShareLoDs(const std::string &in, const std::string &out) const;
virtual void ShareLoD(const std::string &in, const std::string &out, virtual void ShareLoD(const std::string &in, const std::string &out,
size_t i = 0, size_t j = 0) const = 0; size_t i = 0, size_t j = 0) const = 0;
......
...@@ -94,8 +94,20 @@ class ConcatOpGrad : public framework::OperatorWithKernel { ...@@ -94,8 +94,20 @@ class ConcatOpGrad : public framework::OperatorWithKernel {
: OperatorWithKernel(type, inputs, outputs, attrs) {} : OperatorWithKernel(type, inputs, outputs, attrs) {}
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
ctx->SetOutputsDim(framework::GradVarName("X"), ctx->GetInputsDim("X")); auto in_x = "X";
ctx->ShareLoD("X", framework::GradVarName("X")); auto out_x_g_n = framework::GradVarName(in_x);
ctx->SetOutputsDim(out_x_g_n, ctx->GetInputsDim(in_x));
auto &in_names = ctx->Inputs(in_x);
auto &out_names = ctx->Outputs(out_x_g_n);
PADDLE_ENFORCE_EQ(
in_names.size(), out_names.size(),
"The number of arguments in %s[%d] and %s[%d] is not equal.", in_x,
in_names.size(), out_x_g_n, out_names.size());
for (size_t i = 0; i < in_names.size(); ++i) {
if (out_names[i] != framework::kEmptyVarName) {
ctx->ShareLoD(in_x, out_x_g_n, i, i);
}
}
} }
}; };
......
...@@ -280,7 +280,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): ...@@ -280,7 +280,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr):
group_scale_name = self.group_name + "_scale" group_scale_name = self.group_name + "_scale"
if group_scale_name not in self.context: if group_scale_name not in self.context:
group_norm_var = layers.sums(input=self.context[self.group_name]) group_norm_var = layers.sums(input=self.context[self.group_name])
layers.sqrt(x=group_norm_var, out=group_norm_var) group_norm_var = layers.sqrt(x=group_norm_var)
clip_var = self.context[self.group_name + "_clip"] clip_var = self.context[self.group_name + "_clip"]
group_scale_var = layers.elementwise_div( group_scale_var = layers.elementwise_div(
x=clip_var, x=clip_var,
......
...@@ -23,7 +23,10 @@ from ..proto import framework_pb2 ...@@ -23,7 +23,10 @@ from ..proto import framework_pb2
from ..framework import OpProtoHolder, Variable from ..framework import OpProtoHolder, Variable
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
__all__ = ['deprecated', 'generate_layer_fn', 'autodoc', 'templatedoc'] __all__ = [
'deprecated', 'generate_layer_fn', 'generate_layer_fn_noattr', 'autodoc',
'templatedoc'
]
def _convert_(name): def _convert_(name):
...@@ -212,6 +215,29 @@ def generate_layer_fn(op_type): ...@@ -212,6 +215,29 @@ def generate_layer_fn(op_type):
return func return func
def generate_layer_fn_noattr(op_type):
"""Register the Python layer for an Operator without Attribute.
Args:
op_type: The name of the operator to be created.
This function takes in the operator type (sigmoid, exp , tanh etc) and
creates the operator functionality.
"""
op_proto = OpProtoHolder.instance().get_op_proto(op_type)
def func(x, name=None):
helper = LayerHelper(op_type, **locals())
output = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(type=op_type, inputs={"X": x}, outputs={"Out": output})
return output
func.__name__ = op_type
func.__doc__ = _generate_doc_string_(op_proto)
return func
def deprecated(func_or_class): def deprecated(func_or_class):
""" """
Deprecated warning decorator. It will result a warning message. Deprecated warning decorator. It will result a warning message.
......
...@@ -114,6 +114,9 @@ __all__ = [ ...@@ -114,6 +114,9 @@ __all__ = [
'hard_sigmoid', 'hard_sigmoid',
'swish', 'swish',
'prelu', 'prelu',
'brelu',
'leaky_relu',
'soft_relu',
'flatten', 'flatten',
'sequence_mask', 'sequence_mask',
'stack', 'stack',
...@@ -6104,6 +6107,74 @@ def prelu(x, mode, param_attr=None, name=None): ...@@ -6104,6 +6107,74 @@ def prelu(x, mode, param_attr=None, name=None):
return out return out
@templatedoc()
def brelu(x, t_min=0.0, t_max=24.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
t_min(${t_min_type}|0.0): ${t_min_comment}
t_max(${t_max_type}|24.0): ${t_max_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}
"""
helper = LayerHelper('brelu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='brelu',
inputs={'X': x},
outputs={'Out': out},
attrs={'t_min': t_min,
't_max': t_max})
return out
@templatedoc()
def leaky_relu(x, alpha=0.02, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
alpha(${alpha_type}|0.02): ${alpha_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}
"""
helper = LayerHelper('leaky_relu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='leaky_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'alpha': alpha})
return out
@templatedoc()
def soft_relu(x, threshold=40.0, name=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}
threshold(${threshold_type}|40.0): ${threshold_comment}
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
output(${out_type}): ${out_comment}
"""
helper = LayerHelper('soft_relu', **locals())
out = helper.create_tmp_variable(dtype=x.dtype)
helper.append_op(
type='soft_relu',
inputs={'X': x},
outputs={'Out': out},
attrs={'threshold': threshold})
return out
def flatten(x, axis=1, name=None): def flatten(x, axis=1, name=None):
""" """
**Flatten layer** **Flatten layer**
......
...@@ -13,15 +13,14 @@ ...@@ -13,15 +13,14 @@
# limitations under the License. # limitations under the License.
from __future__ import print_function from __future__ import print_function
from .layer_function_generator import generate_layer_fn from .layer_function_generator import generate_layer_fn, generate_layer_fn_noattr
__activations__ = [ __activations_noattr__ = [
'sigmoid', 'sigmoid',
'logsigmoid', 'logsigmoid',
'exp', 'exp',
'tanh', 'tanh',
'tanh_shrink', 'tanh_shrink',
'softshrink',
'sqrt', 'sqrt',
'abs', 'abs',
'ceil', 'ceil',
...@@ -33,9 +32,6 @@ __activations__ = [ ...@@ -33,9 +32,6 @@ __activations__ = [
'square', 'square',
'softplus', 'softplus',
'softsign', 'softsign',
'brelu',
'leaky_relu',
'soft_relu',
] ]
__all__ = [ __all__ = [
...@@ -56,7 +52,8 @@ __all__ = [ ...@@ -56,7 +52,8 @@ __all__ = [
'slice', 'slice',
'shape', 'shape',
'maxout', 'maxout',
] + __activations__ 'softshrink',
]
for _OP in set(__all__): for _OP in set(__all__):
globals()[_OP] = generate_layer_fn(_OP) globals()[_OP] = generate_layer_fn(_OP)
...@@ -66,6 +63,11 @@ for _OP in set(__all__): ...@@ -66,6 +63,11 @@ for _OP in set(__all__):
# e.g.: test_program_code.py, test_dist_train.py # e.g.: test_program_code.py, test_dist_train.py
globals()['_scale'] = generate_layer_fn('scale') globals()['_scale'] = generate_layer_fn('scale')
__all__ += __activations_noattr__
for _OP in set(__activations_noattr__):
globals()[_OP] = generate_layer_fn_noattr(_OP)
__all__ += ["uniform_random"] __all__ += ["uniform_random"]
_uniform_random_ = generate_layer_fn('uniform_random') _uniform_random_ = generate_layer_fn('uniform_random')
......
...@@ -573,6 +573,158 @@ class TestBook(unittest.TestCase): ...@@ -573,6 +573,158 @@ class TestBook(unittest.TestCase):
self.assertIsNotNone(out) self.assertIsNotNone(out)
print(str(program)) print(str(program))
def test_brelu(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.brelu(input, t_min=1.0, t_max=20.0, name='brelu')
self.assertIsNotNone(out)
print(str(program))
def test_leaky_relu(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.leaky_relu(input, alpha=0.1, name='leaky_relu')
self.assertIsNotNone(out)
print(str(program))
def test_soft_relu(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.soft_relu(input, threshold=30.0, name='soft_relu')
self.assertIsNotNone(out)
print(str(program))
def test_sigmoid(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.sigmoid(input, name='sigmoid')
self.assertIsNotNone(out)
print(str(program))
def test_logsigmoid(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.logsigmoid(input, name='logsigmoid')
self.assertIsNotNone(out)
print(str(program))
def test_exp(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.exp(input, name='exp')
self.assertIsNotNone(out)
print(str(program))
def test_tanh(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.tanh(input, name='tanh')
self.assertIsNotNone(out)
print(str(program))
def test_tanh_shrink(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.tanh_shrink(input, name='tanh_shrink')
self.assertIsNotNone(out)
print(str(program))
def test_sqrt(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.sqrt(input, name='sqrt')
self.assertIsNotNone(out)
print(str(program))
def test_abs(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.abs(input, name='abs')
self.assertIsNotNone(out)
print(str(program))
def test_ceil(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.ceil(input, name='ceil')
self.assertIsNotNone(out)
print(str(program))
def test_floor(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.floor(input, name='floor')
self.assertIsNotNone(out)
print(str(program))
def test_cos(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.cos(input, name='cos')
self.assertIsNotNone(out)
print(str(program))
def test_sin(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.sin(input, name='sin')
self.assertIsNotNone(out)
print(str(program))
def test_round(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.round(input, name='round')
self.assertIsNotNone(out)
print(str(program))
def test_reciprocal(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.reciprocal(input, name='reciprocal')
self.assertIsNotNone(out)
print(str(program))
def test_square(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.square(input, name='square')
self.assertIsNotNone(out)
print(str(program))
def test_softplus(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.softplus(input, name='softplus')
self.assertIsNotNone(out)
print(str(program))
def test_softsign(self):
program = Program()
with program_guard(program):
input = layers.data(name="input", shape=[16], dtype="float32")
out = layers.softsign(input, name='softsign')
self.assertIsNotNone(out)
print(str(program))
def test_roi_perspective_transform(self): def test_roi_perspective_transform(self):
program = Program() program = Program()
with program_guard(program): with program_guard(program):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册