Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
8388abe6
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
8388abe6
编写于
11月 29, 2020
作者:
Z
zhang wenhui
提交者:
GitHub
11月 29, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Fix api 1128 (#29174)
* fix 2.0 api, test=develop * fix api, test=develop
上级
f92fdfb8
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
13 addition
and
23 deletion
+13
-23
python/paddle/nn/functional/norm.py
python/paddle/nn/functional/norm.py
+3
-6
python/paddle/nn/layer/norm.py
python/paddle/nn/layer/norm.py
+8
-14
python/paddle/optimizer/adagrad.py
python/paddle/optimizer/adagrad.py
+2
-3
未找到文件。
python/paddle/nn/functional/norm.py
浏览文件 @
8388abe6
...
...
@@ -150,7 +150,6 @@ def batch_norm(x,
import paddle
import numpy as np
paddle.disable_static()
x = np.random.seed(123)
x = np.random.random(size=(2, 1, 2, 3)).astype('float32')
running_mean = np.random.random(size=1).astype('float32')
...
...
@@ -163,7 +162,7 @@ def batch_norm(x,
w = paddle.to_tensor(weight_data)
b = paddle.to_tensor(bias_data)
batch_norm_out = paddle.nn.functional.batch_norm(x, rm, rv, w, b)
print(batch_norm_out
.numpy()
)
print(batch_norm_out)
"""
assert
len
(
x
.
shape
)
>=
2
,
"input dim must be larger than 1"
...
...
@@ -269,14 +268,13 @@ def layer_norm(x,
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
layer_norm = paddle.nn.functional.layer_norm(x, x.shape[1:])
layer_norm_out = layer_norm(x)
print(layer_norm_out
.numpy()
)
print(layer_norm_out)
"""
input_shape
=
list
(
x
.
shape
)
input_ndim
=
len
(
input_shape
)
...
...
@@ -362,13 +360,12 @@ def instance_norm(x,
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm_out = paddle.nn.functional.instancenorm(x)
print(instance_norm_out
.numpy()
)
print(instance_norm_out)
"""
...
...
python/paddle/nn/layer/norm.py
浏览文件 @
8388abe6
...
...
@@ -163,14 +163,13 @@ class InstanceNorm1D(_InstanceNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm1D(2)
instance_norm_out = instance_norm(x)
print(instance_norm_out
.numpy()
)
print(instance_norm_out)
"""
...
...
@@ -235,14 +234,13 @@ class InstanceNorm2D(_InstanceNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm2D(2)
instance_norm_out = instance_norm(x)
print(instance_norm_out
.numpy()
)
print(instance_norm_out)
"""
def
_check_input_dim
(
self
,
input
):
...
...
@@ -306,14 +304,13 @@ class InstanceNorm3D(_InstanceNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
instance_norm = paddle.nn.InstanceNorm3D(2)
instance_norm_out = instance_norm(x)
print(instance_norm_out.numpy
()
)
print(instance_norm_out.numpy)
"""
def
_check_input_dim
(
self
,
input
):
...
...
@@ -352,6 +349,7 @@ class GroupNorm(layers.Layer):
Examples:
.. code-block:: python
import paddle
import numpy as np
...
...
@@ -492,14 +490,13 @@ class LayerNorm(layers.Layer):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
layer_norm = paddle.nn.LayerNorm(x_data.shape[1:])
layer_norm_out = layer_norm(x)
print(layer_norm_out
.numpy()
)
print(layer_norm_out)
"""
def
__init__
(
self
,
...
...
@@ -714,14 +711,13 @@ class BatchNorm1D(_BatchNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 1, 3)).astype('float32')
x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm1D(1)
batch_norm_out = batch_norm(x)
print(batch_norm_out
.numpy()
)
print(batch_norm_out)
"""
def
_check_data_format
(
self
,
input
):
...
...
@@ -804,14 +800,13 @@ class BatchNorm2D(_BatchNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 1, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm2D(1)
batch_norm_out = batch_norm(x)
print(batch_norm_out
.numpy()
)
print(batch_norm_out)
"""
def
_check_data_format
(
self
,
input
):
...
...
@@ -893,14 +888,13 @@ class BatchNorm3D(_BatchNormBase):
import paddle
import numpy as np
paddle.disable_static()
np.random.seed(123)
x_data = np.random.random(size=(2, 1, 2, 2, 3)).astype('float32')
x = paddle.to_tensor(x_data)
batch_norm = paddle.nn.BatchNorm3D(1)
batch_norm_out = batch_norm(x)
print(batch_norm_out
.numpy()
)
print(batch_norm_out)
"""
def
_check_data_format
(
self
,
input
):
...
...
python/paddle/optimizer/adagrad.py
浏览文件 @
8388abe6
...
...
@@ -50,8 +50,8 @@ class Adagrad(Optimizer):
The default value is None in static mode, at this time all parameters will be updated.
weight_decay (float|WeightDecayRegularizer, optional): The strategy of regularization. \
It canbe a float value as coeff of L2 regularization or \
:ref:`api_
fluid_regularizer_L1Decay`, :ref:`api_fluid
_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_
fluid_P
aramAttr` already, \
:ref:`api_
paddle_regularizer_L1Decay`, :ref:`api_paddle
_regularizer_L2Decay`.
If a parameter has set regularizer using :ref:`api_
paddle_fluid_param_attr_
aramAttr` already, \
the regularization setting here in optimizer will be ignored for this parameter. \
Otherwise, the regularization setting here in optimizer will take effect. \
Default None, meaning there is no regularization.
...
...
@@ -71,7 +71,6 @@ class Adagrad(Optimizer):
import paddle
import numpy as np
paddle.disable_static()
inp = paddle.rand(shape=[10, 10])
linear = paddle.nn.Linear(10, 10)
out = linear(inp)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录