diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 590d76ae1708f3dc17f048f3b1bf005f834d0bed..c40b8db6948cf3c07aa7c0b41eef0bfa576e8e6e 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -1345,7 +1345,7 @@ def append_backward(loss, x = paddle.static.data(name='x', shape=[None, 13], dtype='int64') y = paddle.static.data(name='y', shape=[None, 1], dtype='float32') x_emb = paddle.static.nn.embedding(x, size=[100, 256]) - y_predict = paddle.static.nn.fc(input=x_emb, size=1, act=None, name='my_fc') + y_predict = paddle.static.nn.fc(x=x_emb, size=1, activation=None, name='my_fc') loss = F.square_error_cost(input=y_predict, label=y) avg_loss = paddle.mean(loss) diff --git a/python/paddle/fluid/compiler.py b/python/paddle/fluid/compiler.py index 31cacf075b7eff4b3330d064b2172c77ffb5b0b3..0b980c7ebab58210785db3f4f1fe5f746eb8435a 100644 --- a/python/paddle/fluid/compiler.py +++ b/python/paddle/fluid/compiler.py @@ -120,7 +120,7 @@ class CompiledProgram(object): exe = static.Executor(place) data = static.data(name='X', shape=[None, 1], dtype='float32') - hidden = static.nn.fc(input=data, size=10) + hidden = static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) @@ -243,7 +243,7 @@ class CompiledProgram(object): exe = static.Executor(place) data = static.data(name='X', shape=[None, 1], dtype='float32') - hidden = static.nn.fc(input=data, size=10) + hidden = static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) test_program = static.default_main_program().clone(for_test=True) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 939665cb16ae1faa209d44c2639526081d6bf9c8..dbf70e286c24d98760fe9110bc8adcf90c49d1ae 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -3996,7 +3996,7 @@ class Program(object): with static.program_guard(main_program=main_program, startup_program=startup_program): x = static.data(name="x", shape=[-1, 784], dtype='float32') y = static.data(name="y", shape=[-1, 1], dtype='int32') - z = static.nn.fc(name="fc", input=x, size=10, act="relu") + z = static.nn.fc(name="fc", x=x, size=10, activation="relu") print("main program is: {}".format(main_program)) print("start up program is: {}".format(startup_program)) @@ -4344,7 +4344,7 @@ class Program(object): paddle.enable_static() img = static.data(name='image', shape=[None, 784]) - pred = static.nn.fc(input=img, size=10, act='relu') + pred = static.nn.fc(x=img, size=10, actvation='relu') loss = paddle.mean(pred) # Here we use clone before Momentum test_program = static.default_main_program().clone(for_test=True) @@ -4415,10 +4415,10 @@ class Program(object): with static.program_guard(train_program, startup_program): with utils.unique_name.guard(): img = static.data(name='image', shape=[None, 784]) - hidden = static.nn.fc(input=img, size=200, act='relu') + hidden = static.nn.fc(x=img, size=200, activation='relu') hidden = F.dropout(hidden, p=0.5) loss = F.cross_entropy( - input=static.nn.fc(hidden, size=10, act='softmax'), + input=static.nn.fc(x=hidden, size=10, activation='softmax'), label=static.data(name='label', shape=[1], dtype='int64')) avg_loss = paddle.mean(loss) test_program = train_program.clone(for_test=True) @@ -4462,10 +4462,10 @@ class Program(object): def network(): img = static.data(name='image', shape=[None, 784]) - hidden = static.nn.fc(input=img, size=200, act='relu') + hidden = static.nn.fc(x=img, size=200, activation='relu') hidden = F.dropout(hidden, p=0.5) loss = F.cross_entropy( - input=static.nn.fc(hidden, size=10, act='softmax'), + input=static.nn.fc(x=hidden, size=10, activation='softmax'), label=static.data(name='label', shape=[1], dtype='int64')) avg_loss = paddle.mean(loss) return avg_loss @@ -5079,7 +5079,7 @@ class Program(object): program = static.default_main_program() data = static.data(name='x', shape=[None, 13], dtype='float32') - hidden = static.nn.fc(input=data, size=10) + hidden = static.nn.fc(x=data, size=10) loss = paddle.mean(hidden) paddle.optimizer.SGD(learning_rate=0.01).minimize(loss) @@ -5347,7 +5347,7 @@ def default_startup_program(): with paddle.static.program_guard(main_program=main_program, startup_program=startup_program): x = paddle.data(name="x", shape=[-1, 784], dtype='float32') y = paddle.data(name="y", shape=[-1, 1], dtype='int32') - z = paddle.static.nn.fc(name="fc", input=x, size=10, act="relu") + z = paddle.static.nn.fc(name="fc", x=x, size=10, activation="relu") print("main program is: {}".format(paddle.static.default_main_program())) print("start up program is: {}".format(paddle.static.default_startup_program())) @@ -5389,8 +5389,8 @@ def default_main_program(): bn2 = paddle.static.nn.batch_norm(conv2, act='relu') pool2 = paddle.nn.functional.pool2d(bn2, 2, 'max', 2) - fc1 = paddle.static.nn.fc(pool2, size=50, act='relu') - fc2 = paddle.static.nn.fc(fc1, size=102, act='softmax') + fc1 = paddle.static.nn.fc(x=pool2, size=50, activation='relu') + fc2 = paddle.static.nn.fc(x=fc1, size=102, activation='softmax') loss = paddle.nn.functional.loss.cross_entropy(input=fc2, label=label) loss = paddle.mean(loss) @@ -5467,7 +5467,7 @@ def program_guard(main_program, startup_program=None): startup_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32') - hidden = paddle.static.nn.fc(input=data, size=10, act='relu') + hidden = paddle.static.nn.fc(x=data, size=10, activation='relu') Notes: The temporary :code:`Program` can be used if the user does not need to construct either of startup program or main program. diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index dcfead697b94f2ae24ad70a76e763d30d01e7d09..83e282920d02a2e0d9f6f1e5b66a7e6d00b63a9c 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -13598,7 +13598,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): # User-defined debug functions that print out the input Tensor paddle.static.nn.py_func(func=debug_func, x=hidden, out=None) - prediction = paddle.static.nn.fc(hidden, size=10, act='softmax') + prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax') loss = paddle.static.nn.cross_entropy(input=prediction, label=label) return paddle.mean(loss) diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index bf042393706938a61dc74e8af243467cdb99f6dc..4105d5c1a4e49821e7da3423271e2b67b030936b 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -264,17 +264,17 @@ class WeightNormParamAttr(ParamAttr): data = paddle.static.data(name="data", shape=[3, 32, 32], dtype="float32") - fc = paddle.static.nn.fc(input=data, + fc = paddle.static.nn.fc(x=data, size=1000, - param_attr=paddle.static.WeightNormParamAttr( - dim=None, - name='weight_norm_param', - initializer=paddle.nn.initializer.Constant(1.0), - learning_rate=1.0, - regularizer=paddle.regularizer.L2Decay(0.1), - trainable=True, - do_model_average=False, - need_clip=True)) + weight_attr=paddle.static.WeightNormParamAttr( + dim=None, + name='weight_norm_param', + initializer=paddle.nn.initializer.Constant(1.0), + learning_rate=1.0, + regularizer=paddle.regularizer.L2Decay(0.1), + trainable=True, + do_model_average=False, + need_clip=True)) """ # List to record the parameters reparameterized by weight normalization.