Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
b301adc9
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
b301adc9
编写于
10月 14, 2020
作者:
Y
Yiqun Liu
提交者:
GitHub
10月 14, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Update all the examples which use paddle.static.nn.fc. (#27904)
上级
41aad9bf
变更
5
隐藏空白更改
内联
并排
Showing
5 changed file
with
25 addition
and
25 deletion
+25
-25
python/paddle/fluid/backward.py
python/paddle/fluid/backward.py
+1
-1
python/paddle/fluid/compiler.py
python/paddle/fluid/compiler.py
+2
-2
python/paddle/fluid/framework.py
python/paddle/fluid/framework.py
+11
-11
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+1
-1
python/paddle/fluid/param_attr.py
python/paddle/fluid/param_attr.py
+10
-10
未找到文件。
python/paddle/fluid/backward.py
浏览文件 @
b301adc9
...
...
@@ -1345,7 +1345,7 @@ def append_backward(loss,
x = paddle.static.data(name='x', shape=[None, 13], dtype='int64')
y = paddle.static.data(name='y', shape=[None, 1], dtype='float32')
x_emb = paddle.static.nn.embedding(x, size=[100, 256])
y_predict = paddle.static.nn.fc(
input=x_emb, size=1, act
=None, name='my_fc')
y_predict = paddle.static.nn.fc(
x=x_emb, size=1, activation
=None, name='my_fc')
loss = F.square_error_cost(input=y_predict, label=y)
avg_loss = paddle.mean(loss)
...
...
python/paddle/fluid/compiler.py
浏览文件 @
b301adc9
...
...
@@ -120,7 +120,7 @@ class CompiledProgram(object):
exe = static.Executor(place)
data = static.data(name='X', shape=[None, 1], dtype='float32')
hidden = static.nn.fc(
input
=data, size=10)
hidden = static.nn.fc(
x
=data, size=10)
loss = paddle.mean(hidden)
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
...
...
@@ -243,7 +243,7 @@ class CompiledProgram(object):
exe = static.Executor(place)
data = static.data(name='X', shape=[None, 1], dtype='float32')
hidden = static.nn.fc(
input
=data, size=10)
hidden = static.nn.fc(
x
=data, size=10)
loss = paddle.mean(hidden)
test_program = static.default_main_program().clone(for_test=True)
...
...
python/paddle/fluid/framework.py
浏览文件 @
b301adc9
...
...
@@ -3996,7 +3996,7 @@ class Program(object):
with static.program_guard(main_program=main_program, startup_program=startup_program):
x = static.data(name="x", shape=[-1, 784], dtype='float32')
y = static.data(name="y", shape=[-1, 1], dtype='int32')
z = static.nn.fc(name="fc",
input=x, size=10, act
="relu")
z = static.nn.fc(name="fc",
x=x, size=10, activation
="relu")
print("main program is: {}".format(main_program))
print("start up program is: {}".format(startup_program))
...
...
@@ -4344,7 +4344,7 @@ class Program(object):
paddle.enable_static()
img = static.data(name='image', shape=[None, 784])
pred = static.nn.fc(
input=img, size=10, act
='relu')
pred = static.nn.fc(
x=img, size=10, actvation
='relu')
loss = paddle.mean(pred)
# Here we use clone before Momentum
test_program = static.default_main_program().clone(for_test=True)
...
...
@@ -4415,10 +4415,10 @@ class Program(object):
with static.program_guard(train_program, startup_program):
with utils.unique_name.guard():
img = static.data(name='image', shape=[None, 784])
hidden = static.nn.fc(
input=img, size=200, act
='relu')
hidden = static.nn.fc(
x=img, size=200, activation
='relu')
hidden = F.dropout(hidden, p=0.5)
loss = F.cross_entropy(
input=static.nn.fc(
hidden, size=10, act
='softmax'),
input=static.nn.fc(
x=hidden, size=10, activation
='softmax'),
label=static.data(name='label', shape=[1], dtype='int64'))
avg_loss = paddle.mean(loss)
test_program = train_program.clone(for_test=True)
...
...
@@ -4462,10 +4462,10 @@ class Program(object):
def network():
img = static.data(name='image', shape=[None, 784])
hidden = static.nn.fc(
input=img, size=200, act
='relu')
hidden = static.nn.fc(
x=img, size=200, activation
='relu')
hidden = F.dropout(hidden, p=0.5)
loss = F.cross_entropy(
input=static.nn.fc(
hidden, size=10, act
='softmax'),
input=static.nn.fc(
x=hidden, size=10, activation
='softmax'),
label=static.data(name='label', shape=[1], dtype='int64'))
avg_loss = paddle.mean(loss)
return avg_loss
...
...
@@ -5079,7 +5079,7 @@ class Program(object):
program = static.default_main_program()
data = static.data(name='x', shape=[None, 13], dtype='float32')
hidden = static.nn.fc(
input
=data, size=10)
hidden = static.nn.fc(
x
=data, size=10)
loss = paddle.mean(hidden)
paddle.optimizer.SGD(learning_rate=0.01).minimize(loss)
...
...
@@ -5347,7 +5347,7 @@ def default_startup_program():
with paddle.static.program_guard(main_program=main_program, startup_program=startup_program):
x = paddle.data(name="x", shape=[-1, 784], dtype='float32')
y = paddle.data(name="y", shape=[-1, 1], dtype='int32')
z = paddle.static.nn.fc(name="fc",
input=x, size=10, act
="relu")
z = paddle.static.nn.fc(name="fc",
x=x, size=10, activation
="relu")
print("main program is: {}".format(paddle.static.default_main_program()))
print("start up program is: {}".format(paddle.static.default_startup_program()))
...
...
@@ -5389,8 +5389,8 @@ def default_main_program():
bn2 = paddle.static.nn.batch_norm(conv2, act='relu')
pool2 = paddle.nn.functional.pool2d(bn2, 2, 'max', 2)
fc1 = paddle.static.nn.fc(
pool2, size=50, act
='relu')
fc2 = paddle.static.nn.fc(
fc1, size=102, act
='softmax')
fc1 = paddle.static.nn.fc(
x=pool2, size=50, activation
='relu')
fc2 = paddle.static.nn.fc(
x=fc1, size=102, activation
='softmax')
loss = paddle.nn.functional.loss.cross_entropy(input=fc2, label=label)
loss = paddle.mean(loss)
...
...
@@ -5467,7 +5467,7 @@ def program_guard(main_program, startup_program=None):
startup_program = paddle.static.Program()
with paddle.static.program_guard(main_program, startup_program):
data = paddle.static.data(name='image', shape=[None, 784, 784], dtype='float32')
hidden = paddle.static.nn.fc(
input=data, size=10, act
='relu')
hidden = paddle.static.nn.fc(
x=data, size=10, activation
='relu')
Notes: The temporary :code:`Program` can be used if the user does not need
to construct either of startup program or main program.
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
b301adc9
...
...
@@ -13598,7 +13598,7 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
# User-defined debug functions that print out the input Tensor
paddle.static.nn.py_func(func=debug_func, x=hidden, out=None)
prediction = paddle.static.nn.fc(hidden, size=10, act='softmax')
prediction = paddle.static.nn.fc(hidden, size=10, act
ivation
='softmax')
loss = paddle.static.nn.cross_entropy(input=prediction, label=label)
return paddle.mean(loss)
...
...
python/paddle/fluid/param_attr.py
浏览文件 @
b301adc9
...
...
@@ -264,17 +264,17 @@ class WeightNormParamAttr(ParamAttr):
data = paddle.static.data(name="data", shape=[3, 32, 32], dtype="float32")
fc = paddle.static.nn.fc(
input
=data,
fc = paddle.static.nn.fc(
x
=data,
size=1000,
param
_attr=paddle.static.WeightNormParamAttr(
dim=None,
name='weight_norm_param',
initializer=paddle.nn.initializer.Constant(1.0),
learning_rate=1.0,
regularizer=paddle.regularizer.L2Decay(0.1),
trainable=True,
do_model_average=False,
need_clip=True))
weight
_attr=paddle.static.WeightNormParamAttr(
dim=None,
name='weight_norm_param',
initializer=paddle.nn.initializer.Constant(1.0),
learning_rate=1.0,
regularizer=paddle.regularizer.L2Decay(0.1),
trainable=True,
do_model_average=False,
need_clip=True))
"""
# List to record the parameters reparameterized by weight normalization.
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录