Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f05098b5
P
Paddle
项目概览
PaddlePaddle
/
Paddle
接近 2 年 前同步成功
通知
2323
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f05098b5
编写于
7月 15, 2021
作者:
W
wanghuancoder
提交者:
GitHub
7月 15, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cache core.ops (#34058)
* cache core.ops, test=develop * refine, test=develop
上级
2850391d
变更
51
隐藏空白更改
内联
并排
Showing
51 changed file
with
578 addition
and
516 deletion
+578
-516
python/paddle/_C_ops.py
python/paddle/_C_ops.py
+21
-0
python/paddle/distributed/collective.py
python/paddle/distributed/collective.py
+52
-52
python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py
...ptimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py
+3
-2
python/paddle/distribution.py
python/paddle/distribution.py
+11
-10
python/paddle/fluid/contrib/layers/nn.py
python/paddle/fluid/contrib/layers/nn.py
+3
-2
python/paddle/fluid/contrib/optimizer.py
python/paddle/fluid/contrib/optimizer.py
+2
-1
python/paddle/fluid/dygraph/amp/loss_scaler.py
python/paddle/fluid/dygraph/amp/loss_scaler.py
+3
-2
python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py
...paddle/fluid/dygraph/dygraph_to_static/partial_program.py
+2
-1
python/paddle/fluid/dygraph/inplace_utils.py
python/paddle/fluid/dygraph/inplace_utils.py
+2
-1
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+6
-5
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+14
-13
python/paddle/fluid/dygraph_utils.py
python/paddle/fluid/dygraph_utils.py
+4
-3
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-1
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+4
-3
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+3
-2
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+10
-9
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+3
-2
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+60
-62
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+16
-15
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+10
-9
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+2
-1
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
...paddle/fluid/tests/unittests/test_deprecated_decorator.py
+1
-1
python/paddle/incubate/optimizer/modelaverage.py
python/paddle/incubate/optimizer/modelaverage.py
+2
-1
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+3
-2
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+28
-27
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+14
-14
python/paddle/nn/functional/conv.py
python/paddle/nn/functional/conv.py
+6
-5
python/paddle/nn/functional/input.py
python/paddle/nn/functional/input.py
+4
-3
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+71
-70
python/paddle/nn/functional/norm.py
python/paddle/nn/functional/norm.py
+10
-9
python/paddle/nn/functional/pooling.py
python/paddle/nn/functional/pooling.py
+27
-25
python/paddle/nn/functional/vision.py
python/paddle/nn/functional/vision.py
+7
-6
python/paddle/nn/layer/distance.py
python/paddle/nn/layer/distance.py
+4
-3
python/paddle/nn/layer/norm.py
python/paddle/nn/layer/norm.py
+2
-1
python/paddle/nn/layer/rnn.py
python/paddle/nn/layer/rnn.py
+2
-2
python/paddle/nn/quant/quant_layers.py
python/paddle/nn/quant/quant_layers.py
+6
-5
python/paddle/optimizer/adam.py
python/paddle/optimizer/adam.py
+2
-1
python/paddle/optimizer/lamb.py
python/paddle/optimizer/lamb.py
+2
-1
python/paddle/optimizer/momentum.py
python/paddle/optimizer/momentum.py
+2
-1
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+2
-1
python/paddle/optimizer/sgd.py
python/paddle/optimizer/sgd.py
+3
-2
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+3
-2
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+17
-16
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+16
-15
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+12
-11
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+20
-19
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+37
-37
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+13
-13
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+18
-17
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+4
-3
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+7
-7
未找到文件。
python/paddle/_C_ops.py
0 → 100644
浏览文件 @
f05098b5
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.fluid
import
core
__all__
=
[]
for
name
in
dir
(
core
.
ops
):
globals
()[
name
]
=
getattr
(
core
.
ops
,
name
)
__all__
.
append
(
name
)
python/paddle/distributed/collective.py
浏览文件 @
f05098b5
...
@@ -32,6 +32,7 @@ import paddle
...
@@ -32,6 +32,7 @@ import paddle
from
.fleet
import
fleet
from
.fleet
import
fleet
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
from
paddle
import
_C_ops
import
paddle.fluid.dygraph_utils
as
dygraph_utils
import
paddle.fluid.dygraph_utils
as
dygraph_utils
__all__
=
[]
__all__
=
[]
...
@@ -191,7 +192,7 @@ def barrier(group=None):
...
@@ -191,7 +192,7 @@ def barrier(group=None):
temp
=
fill_constant
([
1
],
dtype
=
"int32"
,
value
=
"1"
)
temp
=
fill_constant
([
1
],
dtype
=
"int32"
,
value
=
"1"
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
barrier
(
temp
,
temp
,
'ring_id'
,
ring_id
)
return
_C_
ops
.
barrier
(
temp
,
temp
,
'ring_id'
,
ring_id
)
op_type
=
'barrier'
op_type
=
'barrier'
...
@@ -318,7 +319,7 @@ def wait(tensor, group=None, use_calc_stream=True):
...
@@ -318,7 +319,7 @@ def wait(tensor, group=None, use_calc_stream=True):
def
_sync_calc_stream
(
tensor
):
def
_sync_calc_stream
(
tensor
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_sync_calc_stream
(
tensor
,
tensor
)
return
_C_
ops
.
c_sync_calc_stream
(
tensor
,
tensor
)
op_type
=
'c_sync_calc_stream'
op_type
=
'c_sync_calc_stream'
...
@@ -332,8 +333,7 @@ def _sync_calc_stream(tensor):
...
@@ -332,8 +333,7 @@ def _sync_calc_stream(tensor):
def
_sync_comm_stream
(
tensor
,
ring_id
=
0
):
def
_sync_comm_stream
(
tensor
,
ring_id
=
0
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_sync_comm_stream
([
tensor
],
[
tensor
],
'ring_id'
,
return
_C_ops
.
c_sync_comm_stream
([
tensor
],
[
tensor
],
'ring_id'
,
ring_id
)
ring_id
)
op_type
=
'c_sync_comm_stream'
op_type
=
'c_sync_comm_stream'
...
@@ -391,9 +391,9 @@ def broadcast(tensor, src, group=None, use_calc_stream=True):
...
@@ -391,9 +391,9 @@ def broadcast(tensor, src, group=None, use_calc_stream=True):
assert
gsrc
>=
0
,
(
"src rank out of group, need global rank"
)
assert
gsrc
>=
0
,
(
"src rank out of group, need global rank"
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_broadcast
(
tensor
,
tensor
,
'root'
,
gsrc
,
return
_C_
ops
.
c_broadcast
(
tensor
,
tensor
,
'root'
,
gsrc
,
'use_calc_stream'
,
use_calc_stream
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
'ring_id'
,
ring_id
)
ring_id
)
op_type
=
'c_broadcast'
op_type
=
'c_broadcast'
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -453,17 +453,17 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, use_calc_stream=True):
...
@@ -453,17 +453,17 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
op
==
ReduceOp
.
SUM
:
if
op
==
ReduceOp
.
SUM
:
return
core
.
ops
.
c_allreduce_sum_
(
return
_C_ops
.
c_allreduce_sum_
(
tensor
,
'use_calc_stream'
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
use_calc_stream
,
'ring_id'
,
ring_id
)
elif
op
==
ReduceOp
.
MAX
:
elif
op
==
ReduceOp
.
MAX
:
return
core
.
ops
.
c_allreduce_max_
(
return
_C_ops
.
c_allreduce_max_
(
tensor
,
'use_calc_stream'
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
use_calc_stream
,
'ring_id'
,
ring_id
)
elif
op
==
ReduceOp
.
MIN
:
elif
op
==
ReduceOp
.
MIN
:
return
core
.
ops
.
c_allreduce_min_
(
return
_C_ops
.
c_allreduce_min_
(
tensor
,
'use_calc_stream'
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
use_calc_stream
,
'ring_id'
,
ring_id
)
elif
op
==
ReduceOp
.
PROD
:
elif
op
==
ReduceOp
.
PROD
:
return
core
.
ops
.
c_allreduce_prod_
(
return
_C_ops
.
c_allreduce_prod_
(
tensor
,
'use_calc_stream'
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
use_calc_stream
,
'ring_id'
,
ring_id
)
else
:
else
:
raise
ValueError
(
"Unknown parameter: {}."
.
format
(
op
))
raise
ValueError
(
"Unknown parameter: {}."
.
format
(
op
))
...
@@ -539,21 +539,21 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, use_calc_stream=True):
...
@@ -539,21 +539,21 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, use_calc_stream=True):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
op
==
ReduceOp
.
SUM
:
if
op
==
ReduceOp
.
SUM
:
return
core
.
ops
.
c_reduce_sum
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_sum
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
'root_id'
,
gdst
)
elif
op
==
ReduceOp
.
MAX
:
elif
op
==
ReduceOp
.
MAX
:
return
core
.
ops
.
c_reduce_max
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_max
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
'root_id'
,
gdst
)
elif
op
==
ReduceOp
.
MIN
:
elif
op
==
ReduceOp
.
MIN
:
return
core
.
ops
.
c_reduce_min
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_min
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
'root_id'
,
gdst
)
elif
op
==
ReduceOp
.
PROD
:
elif
op
==
ReduceOp
.
PROD
:
return
core
.
ops
.
c_reduce_prod
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_prod
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
'root_id'
,
gdst
)
else
:
else
:
raise
ValueError
(
"Unknown parameter: {}."
.
format
(
op
))
raise
ValueError
(
"Unknown parameter: {}."
.
format
(
op
))
...
@@ -637,8 +637,8 @@ def all_gather(tensor_list, tensor, group=None, use_calc_stream=True):
...
@@ -637,8 +637,8 @@ def all_gather(tensor_list, tensor, group=None, use_calc_stream=True):
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
tensor
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
tensor
.
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
core
.
ops
.
c_allgather
(
tensor
,
out
,
'use_calc_stream'
,
use_calc_stream
,
_C_
ops
.
c_allgather
(
tensor
,
out
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
)
'ring_id'
,
ring_id
,
'nranks'
,
nranks
)
else
:
else
:
if
not
isinstance
(
tensor_list
,
list
):
if
not
isinstance
(
tensor_list
,
list
):
raise
ValueError
(
"The type of 'tensor_list' for all_gather "
raise
ValueError
(
"The type of 'tensor_list' for all_gather "
...
@@ -725,9 +725,9 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
...
@@ -725,9 +725,9 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
tensor_list
.
append
(
tensor
)
tensor_list
.
append
(
tensor
)
temp
=
paddle
.
concat
(
tensor_list
,
axis
=
0
)
temp
=
paddle
.
concat
(
tensor_list
,
axis
=
0
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_scatter
(
temp
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_scatter
(
temp
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'nranks'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
,
'root'
,
gsrc
)
nranks
,
'root'
,
gsrc
)
op_type
=
'c_scatter'
op_type
=
'c_scatter'
check_variable_and_dtype
(
check_variable_and_dtype
(
tensor
,
'tensor'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
tensor
,
'tensor'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -762,8 +762,8 @@ def _c_identity(tensor, group=None):
...
@@ -762,8 +762,8 @@ def _c_identity(tensor, group=None):
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_identity
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
return
_C_
ops
.
c_identity
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
ring_id
,
'use_model_parallel'
,
True
)
ring_id
,
'use_model_parallel'
,
True
)
op_type
=
'c_identity'
op_type
=
'c_identity'
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
=
LayerHelper
(
op_type
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
tensor
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
tensor
.
dtype
)
...
@@ -805,9 +805,9 @@ def _c_concat(tensor, group=None):
...
@@ -805,9 +805,9 @@ def _c_concat(tensor, group=None):
nranks
=
_get_global_env
().
world_size
if
group
is
None
else
group
.
nranks
nranks
=
_get_global_env
().
world_size
if
group
is
None
else
group
.
nranks
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_concat
(
tensor
,
'ring_id'
,
ring_id
,
'use_calc_stream'
,
return
_C_
ops
.
c_concat
(
tensor
,
'ring_id'
,
ring_id
,
'use_calc_stream'
,
True
,
'rank'
,
rank
,
'nranks'
,
nranks
,
True
,
'rank'
,
rank
,
'nranks'
,
nranks
,
'use_model_parallel'
,
True
)
'use_model_parallel'
,
True
)
op_type
=
'c_concat'
op_type
=
'c_concat'
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
@@ -853,9 +853,9 @@ def _c_split(tensor, group=None):
...
@@ -853,9 +853,9 @@ def _c_split(tensor, group=None):
nranks
=
_get_global_env
().
world_size
if
group
is
None
else
group
.
nranks
nranks
=
_get_global_env
().
world_size
if
group
is
None
else
group
.
nranks
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_split
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
return
_C_
ops
.
c_split
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
,
'use_model_parallel'
,
True
)
'use_model_parallel'
,
True
)
op_type
=
'c_split'
op_type
=
'c_split'
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
@@ -892,7 +892,7 @@ def _mp_allreduce(tensor,
...
@@ -892,7 +892,7 @@ def _mp_allreduce(tensor,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
op
==
ReduceOp
.
SUM
:
if
op
==
ReduceOp
.
SUM
:
return
core
.
ops
.
c_allreduce_sum_
(
return
_C_
ops
.
c_allreduce_sum_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
"use_model_parallel"
,
use_model_parallel
)
"use_model_parallel"
,
use_model_parallel
)
else
:
else
:
...
@@ -933,7 +933,7 @@ def _c_lookup_table(table, index, start_index=0, name=None):
...
@@ -933,7 +933,7 @@ def _c_lookup_table(table, index, start_index=0, name=None):
Tensor.
Tensor.
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
c_embedding
(
table
,
index
,
"start_index"
,
start_index
)
return
_C_
ops
.
c_embedding
(
table
,
index
,
"start_index"
,
start_index
)
op_type
=
'c_embedding'
op_type
=
'c_embedding'
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
@@ -1008,7 +1008,7 @@ def _c_softmax_with_cross_entropy(logits,
...
@@ -1008,7 +1008,7 @@ def _c_softmax_with_cross_entropy(logits,
label
=
paddle
.
unsqueeze
(
label
,
axis
=-
1
)
label
=
paddle
.
unsqueeze
(
label
,
axis
=-
1
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
softmax
,
loss
=
core
.
ops
.
c_softmax_with_cross_entropy
(
softmax
,
loss
=
_C_
ops
.
c_softmax_with_cross_entropy
(
logits
,
label
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
)
logits
,
label
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
)
if
not
return_softmax
:
if
not
return_softmax
:
return
loss
return
loss
...
@@ -1043,8 +1043,8 @@ def _linear(x, weight, bias=None, name=None):
...
@@ -1043,8 +1043,8 @@ def _linear(x, weight, bias=None, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pre_bias
=
_varbase_creator
(
dtype
=
x
.
dtype
)
pre_bias
=
_varbase_creator
(
dtype
=
x
.
dtype
)
core
.
ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
_C_ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
'transpose_Y'
,
False
,
"alpha"
,
1
)
False
,
"alpha"
,
1
)
return
dygraph_utils
.
_append_bias_in_dygraph
(
return
dygraph_utils
.
_append_bias_in_dygraph
(
pre_bias
,
bias
,
axis
=
len
(
x
.
shape
)
-
1
)
pre_bias
,
bias
,
axis
=
len
(
x
.
shape
)
-
1
)
else
:
else
:
...
@@ -1491,8 +1491,8 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
...
@@ -1491,8 +1491,8 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
temp
=
paddle
.
concat
(
in_tensor_list
,
axis
=
0
)
temp
=
paddle
.
concat
(
in_tensor_list
,
axis
=
0
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
core
.
ops
.
alltoall_
(
temp
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
_C_
ops
.
alltoall_
(
temp
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
ring_id
)
else
:
else
:
op_type
=
'alltoall'
op_type
=
'alltoall'
helper
=
LayerHelper
(
op_type
,
**
locals
())
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
@@ -1557,8 +1557,8 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
...
@@ -1557,8 +1557,8 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
send_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
return
_C_
ops
.
send_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'peer'
,
dst
)
'ring_id'
,
ring_id
,
'peer'
,
dst
)
op_type
=
'send_v2'
op_type
=
'send_v2'
check_variable_and_dtype
(
check_variable_and_dtype
(
tensor
,
'tensor'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
tensor
,
'tensor'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -1607,9 +1607,9 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
...
@@ -1607,9 +1607,9 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
recv_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
return
_C_
ops
.
recv_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'peer'
,
src
,
'dtype'
,
'ring_id'
,
ring_id
,
'peer'
,
src
,
'dtype'
,
tensor
.
dtype
,
'out_shape'
,
tensor
.
shape
)
tensor
.
dtype
,
'out_shape'
,
tensor
.
shape
)
op_type
=
'recv_v2'
op_type
=
'recv_v2'
check_variable_and_dtype
(
check_variable_and_dtype
(
tensor
,
'tensor'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
tensor
,
'tensor'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py
浏览文件 @
f05098b5
...
@@ -22,6 +22,7 @@ from paddle.fluid.framework import Variable
...
@@ -22,6 +22,7 @@ from paddle.fluid.framework import Variable
import
types
import
types
from
paddle.fluid
import
core
from
paddle.fluid
import
core
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -64,8 +65,8 @@ class HybridParallelGradScaler:
...
@@ -64,8 +65,8 @@ class HybridParallelGradScaler:
param
.
_grad_ivar
()
for
param
in
optimizer
.
_parameter_list
param
.
_grad_ivar
()
for
param
in
optimizer
.
_parameter_list
if
param
.
_grad_ivar
()
is
not
None
if
param
.
_grad_ivar
()
is
not
None
]
]
core
.
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
_C_
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
self
.
_found_inf
)
self
.
_found_inf
)
# allreduce_max found_inf in check_group
# allreduce_max found_inf in check_group
if
not
self
.
_use_dp_mode
:
if
not
self
.
_use_dp_mode
:
self
.
_found_inf
=
paddle
.
cast
(
self
.
_found_inf
,
dtype
=
"int32"
)
self
.
_found_inf
=
paddle
.
cast
(
self
.
_found_inf
,
dtype
=
"int32"
)
...
...
python/paddle/distribution.py
浏览文件 @
f05098b5
...
@@ -34,6 +34,7 @@ import numpy as np
...
@@ -34,6 +34,7 @@ import numpy as np
import
warnings
import
warnings
from
.fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
.fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle
import
_C_ops
__all__
=
[
'Distribution'
,
'Uniform'
,
'Normal'
,
'Categorical'
]
__all__
=
[
'Distribution'
,
'Uniform'
,
'Normal'
,
'Categorical'
]
...
@@ -151,8 +152,8 @@ class Distribution(object):
...
@@ -151,8 +152,8 @@ class Distribution(object):
warnings
.
warn
(
warnings
.
warn
(
"dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted."
"dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted."
)
)
return
core
.
ops
.
cast
(
value
,
'in_dtype'
,
value
.
dtype
,
return
_C_ops
.
cast
(
value
,
'in_dtype'
,
value
.
dtype
,
'out_dtype'
,
'out_dtype'
,
param
.
dtype
)
param
.
dtype
)
return
value
return
value
check_variable_and_dtype
(
value
,
'value'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
value
,
'value'
,
[
'float32'
,
'float64'
],
...
@@ -328,10 +329,10 @@ class Uniform(Distribution):
...
@@ -328,10 +329,10 @@ class Uniform(Distribution):
lb_bool
=
self
.
low
<
value
lb_bool
=
self
.
low
<
value
ub_bool
=
value
<
self
.
high
ub_bool
=
value
<
self
.
high
lb
=
core
.
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
lb
=
_C_
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
value
.
dtype
)
ub
=
core
.
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
ub
=
_C_
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
value
.
dtype
)
return
nn
.
log
(
lb
*
ub
)
-
nn
.
log
(
self
.
high
-
self
.
low
)
return
nn
.
log
(
lb
*
ub
)
-
nn
.
log
(
self
.
high
-
self
.
low
)
name
=
self
.
name
+
'_log_prob'
name
=
self
.
name
+
'_log_prob'
...
@@ -357,10 +358,10 @@ class Uniform(Distribution):
...
@@ -357,10 +358,10 @@ class Uniform(Distribution):
lb_bool
=
self
.
low
<
value
lb_bool
=
self
.
low
<
value
ub_bool
=
value
<
self
.
high
ub_bool
=
value
<
self
.
high
lb
=
core
.
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
lb
=
_C_
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
value
.
dtype
)
ub
=
core
.
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
ub
=
_C_
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
value
.
dtype
)
return
(
lb
*
ub
)
/
(
self
.
high
-
self
.
low
)
return
(
lb
*
ub
)
/
(
self
.
high
-
self
.
low
)
name
=
self
.
name
+
'_probs'
name
=
self
.
name
+
'_probs'
...
...
python/paddle/fluid/contrib/layers/nn.py
浏览文件 @
f05098b5
...
@@ -50,6 +50,7 @@ from paddle.fluid.param_attr import ParamAttr
...
@@ -50,6 +50,7 @@ from paddle.fluid.param_attr import ParamAttr
from
paddle.fluid.framework
import
Variable
,
convert_np_dtype_to_dtype_
from
paddle.fluid.framework
import
Variable
,
convert_np_dtype_to_dtype_
from
paddle.fluid.layers
import
slice
,
reshape
from
paddle.fluid.layers
import
slice
,
reshape
import
warnings
import
warnings
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'fused_elemwise_activation'
,
'sequence_topk_avg_pooling'
,
'var_conv_2d'
,
'fused_elemwise_activation'
,
'sequence_topk_avg_pooling'
,
'var_conv_2d'
,
...
@@ -1540,7 +1541,7 @@ def bilateral_slice(x, guide, grid, has_offset, name=None):
...
@@ -1540,7 +1541,7 @@ def bilateral_slice(x, guide, grid, has_offset, name=None):
"""
"""
if
paddle
.
fluid
.
in_dygraph_mode
():
if
paddle
.
fluid
.
in_dygraph_mode
():
attrs
=
(
'has_offset'
,
has_offset
)
attrs
=
(
'has_offset'
,
has_offset
)
return
getattr
(
core
.
ops
,
"bilateral_slice"
)(
x
,
grid
,
guide
,
*
attrs
)
return
getattr
(
_C_
ops
,
"bilateral_slice"
)(
x
,
grid
,
guide
,
*
attrs
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'bilateral_slice'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'bilateral_slice'
)
check_variable_and_dtype
(
guide
,
'guide'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
guide
,
'guide'
,
[
'float32'
,
'float64'
],
...
@@ -1616,7 +1617,7 @@ def correlation(x,
...
@@ -1616,7 +1617,7 @@ def correlation(x,
attrs
=
(
"pad_size"
,
pad_size
,
"kernel_size"
,
kernel_size
,
attrs
=
(
"pad_size"
,
pad_size
,
"kernel_size"
,
kernel_size
,
"max_displacement"
,
max_displacement
,
"stride1"
,
stride1
,
"max_displacement"
,
max_displacement
,
"stride1"
,
stride1
,
"stride2"
,
stride2
,
"corr_type_multiply"
,
corr_type_multiply
)
"stride2"
,
stride2
,
"corr_type_multiply"
,
corr_type_multiply
)
output
=
getattr
(
core
.
ops
,
"correlation"
)(
x
,
y
,
*
attrs
)
output
=
getattr
(
_C_
ops
,
"correlation"
)(
x
,
y
,
*
attrs
)
else
:
else
:
helper
=
LayerHelper
(
"correlation"
,
**
locals
())
helper
=
LayerHelper
(
"correlation"
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/fluid/contrib/optimizer.py
浏览文件 @
f05098b5
...
@@ -21,6 +21,7 @@ from paddle.fluid import unique_name
...
@@ -21,6 +21,7 @@ from paddle.fluid import unique_name
from
paddle.fluid
import
layers
from
paddle.fluid
import
layers
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
import
warnings
import
warnings
from
paddle
import
_C_ops
__all__
=
[
'Momentum'
]
__all__
=
[
'Momentum'
]
...
@@ -203,7 +204,7 @@ class Momentum(Optimizer):
...
@@ -203,7 +204,7 @@ class Momentum(Optimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
_
,
_
=
core
.
ops
.
momentum
(
_
,
_
=
_C_
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
'use_nesterov'
,
self
.
_use_nesterov
,
'regularization_method'
,
'use_nesterov'
,
self
.
_use_nesterov
,
'regularization_method'
,
...
...
python/paddle/fluid/dygraph/amp/loss_scaler.py
浏览文件 @
f05098b5
...
@@ -20,6 +20,7 @@ from paddle.fluid.data_feeder import check_type
...
@@ -20,6 +20,7 @@ from paddle.fluid.data_feeder import check_type
from
...wrapped_decorator
import
signature_safe_contextmanager
,
wrap_decorator
from
...wrapped_decorator
import
signature_safe_contextmanager
,
wrap_decorator
import
warnings
import
warnings
import
numpy
as
np
import
numpy
as
np
from
paddle
import
_C_ops
__all__
=
[
'AmpScaler'
]
__all__
=
[
'AmpScaler'
]
...
@@ -215,8 +216,8 @@ class AmpScaler(object):
...
@@ -215,8 +216,8 @@ class AmpScaler(object):
param
.
_grad_ivar
()
for
param
in
optimizer
.
_parameter_list
param
.
_grad_ivar
()
for
param
in
optimizer
.
_parameter_list
if
param
.
_grad_ivar
()
is
not
None
if
param
.
_grad_ivar
()
is
not
None
]
]
core
.
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
_C_
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
self
.
_found_inf
)
self
.
_found_inf
)
def
_update
(
self
):
def
_update
(
self
):
"""
"""
...
...
python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py
浏览文件 @
f05098b5
...
@@ -25,6 +25,7 @@ from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_
...
@@ -25,6 +25,7 @@ from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.layers.utils
import
pack_sequence_as
from
paddle.fluid.layers.utils
import
pack_sequence_as
import
paddle.compat
as
cpt
import
paddle.compat
as
cpt
from
paddle
import
_C_ops
class
NestSequence
(
object
):
class
NestSequence
(
object
):
...
@@ -228,7 +229,7 @@ class PartialProgramLayer:
...
@@ -228,7 +229,7 @@ class PartialProgramLayer:
attrs
=
(
'global_block'
,
self
.
program
.
desc
.
block
(
0
),
'start_op_index'
,
attrs
=
(
'global_block'
,
self
.
program
.
desc
.
block
(
0
),
'start_op_index'
,
0
,
'end_op_index'
,
self
.
_infer_program
.
desc
.
block
(
0
).
op_size
(),
0
,
'end_op_index'
,
self
.
_infer_program
.
desc
.
block
(
0
).
op_size
(),
'is_test'
,
not
self
.
training
)
'is_test'
,
not
self
.
training
)
core
.
ops
.
run_program
(
_C_
ops
.
run_program
(
self
.
_valid_vars
(
in_vars
),
self
.
_valid_vars
(
in_vars
),
self
.
_valid_vars
(
self
.
_params
),
self
.
_valid_vars
(
self
.
_params
),
self
.
_valid_vars
(
out_vars
),
self
.
_tmp_scope_vec
,
self
.
_double_grads
,
self
.
_valid_vars
(
out_vars
),
self
.
_tmp_scope_vec
,
self
.
_double_grads
,
...
...
python/paddle/fluid/dygraph/inplace_utils.py
浏览文件 @
f05098b5
...
@@ -16,9 +16,10 @@ from ..wrapped_decorator import wrap_decorator
...
@@ -16,9 +16,10 @@ from ..wrapped_decorator import wrap_decorator
from
..framework
import
in_dygraph_mode
from
..framework
import
in_dygraph_mode
import
warnings
import
warnings
import
paddle
import
paddle
from
paddle
import
_C_ops
# NOTE(pangyoki): The Inplace APIs with underline(`_`) is only valid for the method of calling `
core.
ops`
# NOTE(pangyoki): The Inplace APIs with underline(`_`) is only valid for the method of calling `
_C_
ops`
# in dygraph mode. If static mode is used, the inplace mechanism will not be used, and the static method
# in dygraph mode. If static mode is used, the inplace mechanism will not be used, and the static method
# of the original API will be called.
# of the original API will be called.
def
_inplace_apis_in_dygraph_only_
(
func
):
def
_inplace_apis_in_dygraph_only_
(
func
):
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
f05098b5
...
@@ -21,6 +21,7 @@ from . import no_grad
...
@@ -21,6 +21,7 @@ from . import no_grad
import
numpy
as
np
import
numpy
as
np
import
warnings
import
warnings
from
paddle
import
_C_ops
_supported_int_dtype_
=
[
_supported_int_dtype_
=
[
core
.
VarDesc
.
VarType
.
UINT8
,
core
.
VarDesc
.
VarType
.
UINT8
,
...
@@ -67,8 +68,8 @@ def monkey_patch_math_varbase():
...
@@ -67,8 +68,8 @@ def monkey_patch_math_varbase():
@
no_grad
@
no_grad
def
create_tensor
(
value
,
dtype
,
shape
):
def
create_tensor
(
value
,
dtype
,
shape
):
out
=
_varbase_creator
(
dtype
=
dtype
)
out
=
_varbase_creator
(
dtype
=
dtype
)
out
=
core
.
ops
.
fill_constant
(
out
,
'dtype'
,
dtype
,
'shape'
,
shape
,
out
=
_C_ops
.
fill_constant
(
out
,
'dtype'
,
dtype
,
'shape'
,
shape
,
'value'
,
'value'
,
value
,
'force_cpu'
,
False
)
value
,
'force_cpu'
,
False
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
...
@@ -100,10 +101,10 @@ def monkey_patch_math_varbase():
...
@@ -100,10 +101,10 @@ def monkey_patch_math_varbase():
"""
"""
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
return
core
.
ops
.
cast
(
self
,
'in_dtype'
,
self
.
dtype
,
'out_dtype'
,
dtype
)
return
_C_
ops
.
cast
(
self
,
'in_dtype'
,
self
.
dtype
,
'out_dtype'
,
dtype
)
def
_scalar_elementwise_op_
(
var
,
scale
,
bias
):
def
_scalar_elementwise_op_
(
var
,
scale
,
bias
):
return
core
.
ops
.
scale
(
var
,
'scale'
,
scale
,
'bias'
,
bias
)
return
_C_
ops
.
scale
(
var
,
'scale'
,
scale
,
'bias'
,
bias
)
def
_neg_
(
var
):
def
_neg_
(
var
):
return
_scalar_elementwise_op_
(
var
,
-
1.0
,
0.0
)
return
_scalar_elementwise_op_
(
var
,
-
1.0
,
0.0
)
...
@@ -242,7 +243,7 @@ def monkey_patch_math_varbase():
...
@@ -242,7 +243,7 @@ def monkey_patch_math_varbase():
# 4. calculation
# 4. calculation
axis
=
-
1
axis
=
-
1
math_op
=
getattr
(
core
.
ops
,
op_type
)
math_op
=
getattr
(
_C_
ops
,
op_type
)
return
math_op
(
self
,
other_var
,
'axis'
,
axis
)
return
math_op
(
self
,
other_var
,
'axis'
,
axis
)
comment
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
).
comment
comment
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
).
comment
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
f05098b5
...
@@ -33,6 +33,7 @@ import numbers
...
@@ -33,6 +33,7 @@ import numbers
import
logging
import
logging
import
os
import
os
import
paddle.utils.deprecated
as
deprecated
import
paddle.utils.deprecated
as
deprecated
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
'BatchNorm'
,
'Dropout'
,
'Embedding'
,
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
'BatchNorm'
,
'Dropout'
,
'Embedding'
,
...
@@ -236,7 +237,7 @@ class Conv2D(layers.Layer):
...
@@ -236,7 +237,7 @@ class Conv2D(layers.Layer):
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
if
self
.
_groups
else
1
,
'use_cudnn'
,
self
.
_use_cudnn
,
if
self
.
_groups
else
1
,
'use_cudnn'
,
self
.
_use_cudnn
,
'use_mkldnn'
,
self
.
_use_mkldnn
)
'use_mkldnn'
,
self
.
_use_mkldnn
)
out
=
core
.
ops
.
conv2d
(
input
,
self
.
weight
,
*
attrs
)
out
=
_C_
ops
.
conv2d
(
input
,
self
.
weight
,
*
attrs
)
pre_bias
=
out
pre_bias
=
out
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
...
@@ -866,7 +867,7 @@ class Pool2D(layers.Layer):
...
@@ -866,7 +867,7 @@ class Pool2D(layers.Layer):
'use_cudnn'
,
self
.
_use_cudnn
,
'ceil_mode'
,
self
.
_ceil_mode
,
'use_cudnn'
,
self
.
_use_cudnn
,
'ceil_mode'
,
self
.
_ceil_mode
,
'use_mkldnn'
,
self
.
_use_mkldnn
,
'exclusive'
,
'use_mkldnn'
,
self
.
_use_mkldnn
,
'exclusive'
,
self
.
_exclusive
,
'data_format'
,
self
.
_data_format
)
self
.
_exclusive
,
'data_format'
,
self
.
_data_format
)
return
core
.
ops
.
pool2d
(
input
,
*
attrs
)
return
_C_
ops
.
pool2d
(
input
,
*
attrs
)
check_variable_and_dtype
(
check_variable_and_dtype
(
input
,
'input'
,
[
'int8'
,
'uint8'
,
'float16'
,
'float32'
,
'float64'
],
input
,
'input'
,
[
'int8'
,
'uint8'
,
'float16'
,
'float32'
,
'float64'
],
...
@@ -971,9 +972,9 @@ class Linear(layers.Layer):
...
@@ -971,9 +972,9 @@ class Linear(layers.Layer):
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pre_bias
=
_varbase_creator
(
dtype
=
input
.
dtype
)
pre_bias
=
_varbase_creator
(
dtype
=
input
.
dtype
)
core
.
ops
.
matmul
(
input
,
self
.
weight
,
pre_bias
,
'transpose_X'
,
False
,
_C_
ops
.
matmul
(
input
,
self
.
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
False
,
"alpha"
,
1
,
"use_mkldnn"
,
'transpose_Y'
,
False
,
"alpha"
,
1
,
"use_mkldnn"
,
self
.
_use_mkldnn
)
self
.
_use_mkldnn
)
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
pre_bias
,
pre_bias
,
self
.
bias
,
self
.
bias
,
...
@@ -1116,8 +1117,8 @@ class InstanceNorm(layers.Layer):
...
@@ -1116,8 +1117,8 @@ class InstanceNorm(layers.Layer):
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
out
,
_
,
_
=
_C_
ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
'epsilon'
,
self
.
_epsilon
)
'epsilon'
,
self
.
_epsilon
)
return
out
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
,
'float64'
],
...
@@ -1337,7 +1338,7 @@ class BatchNorm(layers.Layer):
...
@@ -1337,7 +1338,7 @@ class BatchNorm(layers.Layer):
"fuse_with_relu"
,
self
.
_fuse_with_relu
,
"use_global_stats"
,
"fuse_with_relu"
,
self
.
_fuse_with_relu
,
"use_global_stats"
,
self
.
_use_global_stats
,
'trainable_statistics'
,
self
.
_use_global_stats
,
'trainable_statistics'
,
self
.
_trainable_statistics
)
self
.
_trainable_statistics
)
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
batch_norm
(
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
batch_norm
(
input
,
self
.
weight
,
self
.
bias
,
self
.
_mean
,
self
.
_variance
,
input
,
self
.
weight
,
self
.
bias
,
self
.
_mean
,
self
.
_variance
,
mean_out
,
variance_out
,
*
attrs
)
mean_out
,
variance_out
,
*
attrs
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
@@ -1488,7 +1489,7 @@ class Dropout(layers.Layer):
...
@@ -1488,7 +1489,7 @@ class Dropout(layers.Layer):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
attrs
=
sum
(
attrs
.
items
(),
())
attrs
=
sum
(
attrs
.
items
(),
())
out
,
mask
=
core
.
ops
.
dropout
(
input
,
*
attrs
)
out
,
mask
=
_C_
ops
.
dropout
(
input
,
*
attrs
)
return
out
return
out
out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
...
@@ -1640,7 +1641,7 @@ class Embedding(layers.Layer):
...
@@ -1640,7 +1641,7 @@ class Embedding(layers.Layer):
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
lookup_table_v2
(
return
_C_
ops
.
lookup_table_v2
(
self
.
weight
,
input
,
'is_sparse'
,
self
.
_is_sparse
,
self
.
weight
,
input
,
'is_sparse'
,
self
.
_is_sparse
,
'is_distributed'
,
self
.
_is_distributed
,
'remote_prefetch'
,
'is_distributed'
,
self
.
_is_distributed
,
'remote_prefetch'
,
self
.
_remote_prefetch
,
'padding_idx'
,
self
.
_padding_idx
)
self
.
_remote_prefetch
,
'padding_idx'
,
self
.
_padding_idx
)
...
@@ -1794,7 +1795,7 @@ class LayerNorm(layers.Layer):
...
@@ -1794,7 +1795,7 @@ class LayerNorm(layers.Layer):
1
:]
+
', but got input shape '
+
str
(
input_shape
))
1
:]
+
', but got input shape '
+
str
(
input_shape
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pre_act
,
_
,
_
=
core
.
ops
.
layer_norm
(
pre_act
,
_
,
_
=
_C_
ops
.
layer_norm
(
input
,
self
.
weight
,
self
.
bias
,
'epsilon'
,
self
.
_epsilon
,
input
,
self
.
weight
,
self
.
bias
,
'epsilon'
,
self
.
_epsilon
,
'begin_norm_axis'
,
self
.
_begin_norm_axis
)
'begin_norm_axis'
,
self
.
_begin_norm_axis
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
@@ -1979,7 +1980,7 @@ class GRUUnit(layers.Layer):
...
@@ -1979,7 +1980,7 @@ class GRUUnit(layers.Layer):
def
forward
(
self
,
input
,
hidden
):
def
forward
(
self
,
input
,
hidden
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
gate
,
reset_hidden_pre
,
updated_hidden
=
core
.
ops
.
gru_unit
(
gate
,
reset_hidden_pre
,
updated_hidden
=
_C_
ops
.
gru_unit
(
input
,
hidden
,
self
.
weight
,
self
.
bias
,
'activation'
,
input
,
hidden
,
self
.
weight
,
self
.
bias
,
'activation'
,
self
.
activation
,
'gate_activation'
,
self
.
gate_activation
)
self
.
activation
,
'gate_activation'
,
self
.
gate_activation
)
return
updated_hidden
,
reset_hidden_pre
,
gate
return
updated_hidden
,
reset_hidden_pre
,
gate
...
@@ -2665,7 +2666,7 @@ class Conv2DTranspose(layers.Layer):
...
@@ -2665,7 +2666,7 @@ class Conv2DTranspose(layers.Layer):
def
forward
(
self
,
input
):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
self
.
_op_type
)
op
=
getattr
(
_C_
ops
,
self
.
_op_type
)
out
=
op
(
input
,
self
.
weight
,
'output_size'
,
self
.
_output_size
,
out
=
op
(
input
,
self
.
weight
,
'output_size'
,
self
.
_output_size
,
'strides'
,
self
.
_stride
,
'paddings'
,
self
.
_padding
,
'strides'
,
self
.
_stride
,
'paddings'
,
self
.
_padding
,
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
,
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
,
...
...
python/paddle/fluid/dygraph_utils.py
浏览文件 @
f05098b5
...
@@ -14,6 +14,7 @@
...
@@ -14,6 +14,7 @@
from
.
import
core
from
.
import
core
from
.framework
import
dygraph_only
from
.framework
import
dygraph_only
from
paddle
import
_C_ops
@
dygraph_only
@
dygraph_only
...
@@ -40,7 +41,7 @@ def _append_activation_in_dygraph(input,
...
@@ -40,7 +41,7 @@ def _append_activation_in_dygraph(input,
if
use_mkldnn
:
if
use_mkldnn
:
attrs
+=
(
'use_mkldnn'
,
use_mkldnn
)
attrs
+=
(
'use_mkldnn'
,
use_mkldnn
)
act_op
=
getattr
(
core
.
ops
,
act
)
act_op
=
getattr
(
_C_
ops
,
act
)
return
act_op
(
input
,
*
attrs
)
return
act_op
(
input
,
*
attrs
)
...
@@ -59,5 +60,5 @@ def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False):
...
@@ -59,5 +60,5 @@ def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False):
if
bias
is
None
:
if
bias
is
None
:
return
input
return
input
return
core
.
ops
.
elementwise_add
(
input
,
bias
,
'axis'
,
axis
,
'use_mkldnn'
,
return
_C_
ops
.
elementwise_add
(
input
,
bias
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
use_mkldnn
)
python/paddle/fluid/layers/control_flow.py
浏览文件 @
f05098b5
...
@@ -29,6 +29,7 @@ from functools import reduce, partial
...
@@ -29,6 +29,7 @@ from functools import reduce, partial
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
...
import
compat
as
cpt
from
...
import
compat
as
cpt
from
..backward
import
_infer_var_data_type_shape_
from
..backward
import
_infer_var_data_type_shape_
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'While'
,
'Switch'
,
'increment'
,
'array_write'
,
'create_array'
,
'less_than'
,
'While'
,
'Switch'
,
'increment'
,
'array_write'
,
'create_array'
,
'less_than'
,
...
@@ -3805,7 +3806,7 @@ def is_empty(x, name=None):
...
@@ -3805,7 +3806,7 @@ def is_empty(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
is_empty
(
x
)
return
_C_
ops
.
is_empty
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
)
'is_empty'
)
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
f05098b5
...
@@ -34,6 +34,7 @@ import numpy as np
...
@@ -34,6 +34,7 @@ import numpy as np
from
functools
import
reduce
from
functools
import
reduce
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle.utils
import
deprecated
from
paddle.utils
import
deprecated
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'prior_box'
,
'prior_box'
,
...
@@ -2990,7 +2991,7 @@ def generate_proposals(scores,
...
@@ -2990,7 +2991,7 @@ def generate_proposals(scores,
assert
return_rois_num
,
"return_rois_num should be True in dygraph mode."
assert
return_rois_num
,
"return_rois_num should be True in dygraph mode."
attrs
=
(
'pre_nms_topN'
,
pre_nms_top_n
,
'post_nms_topN'
,
post_nms_top_n
,
attrs
=
(
'pre_nms_topN'
,
pre_nms_top_n
,
'post_nms_topN'
,
post_nms_top_n
,
'nms_thresh'
,
nms_thresh
,
'min_size'
,
min_size
,
'eta'
,
eta
)
'nms_thresh'
,
nms_thresh
,
'min_size'
,
min_size
,
'eta'
,
eta
)
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
=
core
.
ops
.
generate_proposals
(
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
=
_C_
ops
.
generate_proposals
(
scores
,
bbox_deltas
,
im_info
,
anchors
,
variances
,
*
attrs
)
scores
,
bbox_deltas
,
im_info
,
anchors
,
variances
,
*
attrs
)
return
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
return
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
...
@@ -3756,7 +3757,7 @@ def distribute_fpn_proposals(fpn_rois,
...
@@ -3756,7 +3757,7 @@ def distribute_fpn_proposals(fpn_rois,
assert
rois_num
is
not
None
,
"rois_num should not be None in dygraph mode."
assert
rois_num
is
not
None
,
"rois_num should not be None in dygraph mode."
attrs
=
(
'min_level'
,
min_level
,
'max_level'
,
max_level
,
'refer_level'
,
attrs
=
(
'min_level'
,
min_level
,
'max_level'
,
max_level
,
'refer_level'
,
refer_level
,
'refer_scale'
,
refer_scale
)
refer_level
,
'refer_scale'
,
refer_scale
)
multi_rois
,
restore_ind
,
rois_num_per_level
=
core
.
ops
.
distribute_fpn_proposals
(
multi_rois
,
restore_ind
,
rois_num_per_level
=
_C_
ops
.
distribute_fpn_proposals
(
fpn_rois
,
rois_num
,
num_lvl
,
num_lvl
,
*
attrs
)
fpn_rois
,
rois_num
,
num_lvl
,
num_lvl
,
*
attrs
)
return
multi_rois
,
restore_ind
,
rois_num_per_level
return
multi_rois
,
restore_ind
,
rois_num_per_level
...
@@ -3952,7 +3953,7 @@ def collect_fpn_proposals(multi_rois,
...
@@ -3952,7 +3953,7 @@ def collect_fpn_proposals(multi_rois,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
assert
rois_num_per_level
is
not
None
,
"rois_num_per_level should not be None in dygraph mode."
assert
rois_num_per_level
is
not
None
,
"rois_num_per_level should not be None in dygraph mode."
attrs
=
(
'post_nms_topN'
,
post_nms_top_n
)
attrs
=
(
'post_nms_topN'
,
post_nms_top_n
)
output_rois
,
rois_num
=
core
.
ops
.
collect_fpn_proposals
(
output_rois
,
rois_num
=
_C_
ops
.
collect_fpn_proposals
(
input_rois
,
input_scores
,
rois_num_per_level
,
*
attrs
)
input_rois
,
input_scores
,
rois_num_per_level
,
*
attrs
)
check_type
(
multi_rois
,
'multi_rois'
,
list
,
'collect_fpn_proposals'
)
check_type
(
multi_rois
,
'multi_rois'
,
list
,
'collect_fpn_proposals'
)
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
f05098b5
...
@@ -23,6 +23,7 @@ from ..proto import framework_pb2
...
@@ -23,6 +23,7 @@ from ..proto import framework_pb2
from
..framework
import
OpProtoHolder
,
Variable
,
core
,
convert_np_dtype_to_dtype_
,
in_dygraph_mode
from
..framework
import
OpProtoHolder
,
Variable
,
core
,
convert_np_dtype_to_dtype_
,
in_dygraph_mode
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..data_feeder
import
check_variable_and_dtype
from
..data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'generate_layer_fn'
,
'generate_activation_fn'
,
'generate_inplace_fn'
,
'generate_layer_fn'
,
'generate_activation_fn'
,
'generate_inplace_fn'
,
...
@@ -257,7 +258,7 @@ def generate_activation_fn(op_type):
...
@@ -257,7 +258,7 @@ def generate_activation_fn(op_type):
def
func
(
x
,
name
=
None
):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_type
)
op
=
getattr
(
_C_
ops
,
op_type
)
return
op
(
x
)
return
op
(
x
)
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
...
@@ -297,7 +298,7 @@ def generate_inplace_fn(inplace_op_type):
...
@@ -297,7 +298,7 @@ def generate_inplace_fn(inplace_op_type):
def
func
(
x
,
name
=
None
):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
inplace_op_type
)
op
=
getattr
(
_C_
ops
,
inplace_op_type
)
return
op
(
x
)
return
op
(
x
)
warnings
.
warn
(
warnings
.
warn
(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
f05098b5
...
@@ -27,6 +27,7 @@ from ..param_attr import ParamAttr
...
@@ -27,6 +27,7 @@ from ..param_attr import ParamAttr
from
..initializer
import
NumpyArrayInitializer
,
Constant
from
..initializer
import
NumpyArrayInitializer
,
Constant
from
..
import
core
from
..
import
core
import
warnings
import
warnings
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'center_loss'
,
'center_loss'
,
...
@@ -261,8 +262,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
...
@@ -261,8 +262,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
return
cross_entropy2
(
input
,
label
,
ignore_index
)
return
cross_entropy2
(
input
,
label
,
ignore_index
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
cross_entropy
(
input
,
label
,
"soft_label"
,
soft_label
,
return
_C_
ops
.
cross_entropy
(
input
,
label
,
"soft_label"
,
soft_label
,
"ignore_index"
,
ignore_index
)
"ignore_index"
,
ignore_index
)
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
]}
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
]}
attrs
=
{
"soft_label"
:
soft_label
,
"ignore_index"
:
ignore_index
}
attrs
=
{
"soft_label"
:
soft_label
,
"ignore_index"
:
ignore_index
}
...
@@ -278,8 +279,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
...
@@ -278,8 +279,8 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
def
cross_entropy2
(
input
,
label
,
ignore_index
=
kIgnoreIndex
):
def
cross_entropy2
(
input
,
label
,
ignore_index
=
kIgnoreIndex
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
loss
,
_
,
_
=
core
.
ops
.
cross_entropy2
(
input
,
label
,
'ignore_index'
,
loss
,
_
,
_
=
_C_
ops
.
cross_entropy2
(
input
,
label
,
'ignore_index'
,
ignore_index
)
ignore_index
)
return
loss
return
loss
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
]}
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
]}
...
@@ -335,8 +336,8 @@ def square_error_cost(input, label):
...
@@ -335,8 +336,8 @@ def square_error_cost(input, label):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
minus_out
=
core
.
ops
.
elementwise_sub
(
input
,
label
)
minus_out
=
_C_
ops
.
elementwise_sub
(
input
,
label
)
square_out
=
core
.
ops
.
square
(
minus_out
)
square_out
=
_C_
ops
.
square
(
minus_out
)
return
square_out
return
square_out
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
...
@@ -600,7 +601,7 @@ def warpctc(input,
...
@@ -600,7 +601,7 @@ def warpctc(input,
raise
ValueError
(
raise
ValueError
(
"input_length and label_length must not be None in dygraph mode!"
"input_length and label_length must not be None in dygraph mode!"
)
)
grad
,
loss_out
=
core
.
ops
.
warpctc
(
grad
,
loss_out
=
_C_
ops
.
warpctc
(
input
,
input
,
label
,
label
,
input_length
,
input_length
,
...
@@ -1260,12 +1261,12 @@ def softmax_with_cross_entropy(logits,
...
@@ -1260,12 +1261,12 @@ def softmax_with_cross_entropy(logits,
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
core
.
is_compiled_with_npu
():
if
core
.
is_compiled_with_npu
():
softmax
,
backprop
,
loss
=
core
.
ops
.
softmax_with_cross_entropy
(
softmax
,
backprop
,
loss
=
_C_
ops
.
softmax_with_cross_entropy
(
logits
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
logits
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
ignore_index
,
'numeric_stable_mode'
,
numeric_stable_mode
,
ignore_index
,
'numeric_stable_mode'
,
numeric_stable_mode
,
'axis'
,
axis
)
'axis'
,
axis
)
else
:
else
:
softmax
,
loss
=
core
.
ops
.
softmax_with_cross_entropy
(
softmax
,
loss
=
_C_
ops
.
softmax_with_cross_entropy
(
logits
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
logits
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
ignore_index
,
'numeric_stable_mode'
,
numeric_stable_mode
,
ignore_index
,
'numeric_stable_mode'
,
numeric_stable_mode
,
'axis'
,
axis
)
'axis'
,
axis
)
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
f05098b5
...
@@ -25,6 +25,7 @@ from .. import core
...
@@ -25,6 +25,7 @@ from .. import core
from
..param_attr
import
ParamAttr
from
..param_attr
import
ParamAttr
from
.
import
nn
from
.
import
nn
from
..data_feeder
import
check_variable_and_dtype
from
..data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[
'accuracy'
,
'auc'
]
__all__
=
[
'accuracy'
,
'auc'
]
...
@@ -84,8 +85,8 @@ def accuracy(input, label, k=1, correct=None, total=None):
...
@@ -84,8 +85,8 @@ def accuracy(input, label, k=1, correct=None, total=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
nn
.
topk
(
input
,
k
=
k
)
topk_out
,
topk_indices
=
nn
.
topk
(
input
,
k
=
k
)
_acc
,
_
,
_
=
core
.
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
total
)
return
_acc
return
_acc
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
f05098b5
...
@@ -39,6 +39,7 @@ from ...utils import deprecated
...
@@ -39,6 +39,7 @@ from ...utils import deprecated
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
import paddle
from paddle.utils import deprecated
from paddle.utils import deprecated
from paddle import _C_ops
__all__ = [
__all__ = [
'fc',
'fc',
...
@@ -201,7 +202,7 @@ def _elementwise_op_in_dygraph(x,
...
@@ -201,7 +202,7 @@ def _elementwise_op_in_dygraph(x,
act=None,
act=None,
use_mkldnn=False,
use_mkldnn=False,
op_name=None):
op_name=None):
op = getattr(
core.
ops, op_name)
op = getattr(
_C_
ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
return dygraph_utils._append_activation_in_dygraph(
...
@@ -1029,7 +1030,7 @@ def dropout(x,
...
@@ -1029,7 +1030,7 @@ def dropout(x,
seed = default_main_program().random_seed
seed = default_main_program().random_seed
if is_test is None:
if is_test is None:
is_test = not _dygraph_tracer()._train_mode
is_test = not _dygraph_tracer()._train_mode
out, mask =
core.
ops.dropout(
out, mask =
_C_
ops.dropout(
x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed',
x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0,
seed is not None, 'seed', seed if seed is not None else 0,
'dropout_implementation', dropout_implementation)
'dropout_implementation', dropout_implementation)
...
@@ -1333,7 +1334,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
...
@@ -1333,7 +1334,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
return
_C_
ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
inputs = {"X": [input]}
inputs = {"X": [input]}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
...
@@ -4415,8 +4416,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
...
@@ -4415,8 +4416,8 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
reduce_all = True if dim == None or dim == [] or len(dim) == len(
reduce_all = True if dim == None or dim == [] or len(dim) == len(
input.shape) else False
input.shape) else False
dim = dim if dim != None and dim != [] else [0]
dim = dim if dim != None and dim != [] else [0]
return
core.
ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
return
_C_
ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
'reduce_all', reduce_all)
attrs = {
attrs = {
'dim': dim if dim != None and dim != [] else [0],
'dim': dim if dim != None and dim != [] else [0],
'keep_dim': keep_dim,
'keep_dim': keep_dim,
...
@@ -4898,7 +4899,7 @@ def split(input, num_or_sections, dim=-1, name=None):
...
@@ -4898,7 +4899,7 @@ def split(input, num_or_sections, dim=-1, name=None):
raise TypeError(
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s." % (type(num_or_sections)))
"received %s." % (type(num_or_sections)))
return
core.
ops.split(input, num, *attrs)
return
_C_
ops.split(input, num, *attrs)
check_variable_and_dtype(
check_variable_and_dtype(
input, 'input',
input, 'input',
...
@@ -5133,8 +5134,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
...
@@ -5133,8 +5134,8 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
out = _varbase_creator(dtype=x.dtype)
core.
ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
_C_
ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
transpose_y, 'alpha', float(alpha))
return out
return out
def __check_input(x, y):
def __check_input(x, y):
...
@@ -5265,7 +5266,7 @@ def topk(input, k, name=None):
...
@@ -5265,7 +5266,7 @@ def topk(input, k, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices =
core.
ops.top_k(input, 'k', _k)
out, indices =
_C_
ops.top_k(input, 'k', _k)
out.stop_gradient = True
out.stop_gradient = True
indices.stop_gradient = True
indices.stop_gradient = True
return out, indices
return out, indices
...
@@ -5508,7 +5509,7 @@ def transpose(x, perm, name=None):
...
@@ -5508,7 +5509,7 @@ def transpose(x, perm, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
out, _ =
core.
ops.transpose2(x, 'axis', perm)
out, _ =
_C_
ops.transpose2(x, 'axis', perm)
return out
return out
check_variable_and_dtype(
check_variable_and_dtype(
...
@@ -5790,7 +5791,7 @@ def multiplex(inputs, index, name=None):
...
@@ -5790,7 +5791,7 @@ def multiplex(inputs, index, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.multiplex(index, inputs)
return
_C_
ops.multiplex(index, inputs)
helper = LayerHelper('multiplex', **locals())
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
check_type(inputs, 'inputs', (list), 'multiplex')
...
@@ -5976,8 +5977,8 @@ def one_hot(input, depth, allow_out_of_range=False):
...
@@ -5976,8 +5977,8 @@ def one_hot(input, depth, allow_out_of_range=False):
assert depth.shape == (
assert depth.shape == (
1, ), "depth of type Variable should have shape [1]"
1, ), "depth of type Variable should have shape [1]"
depth = depth.item(0)
depth = depth.item(0)
out =
core.
ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
out =
_C_
ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
allow_out_of_range)
allow_out_of_range)
out.stop_gradient = True
out.stop_gradient = True
return out
return out
...
@@ -6158,10 +6159,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
...
@@ -6158,10 +6159,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
for item in shape
]
]
out, _ =
core.
ops.reshape2(x, None, 'shape', shape)
out, _ =
_C_
ops.reshape2(x, None, 'shape', shape)
elif isinstance(shape, Variable):
elif isinstance(shape, Variable):
shape.stop_gradient = True
shape.stop_gradient = True
out, _ =
core.
ops.reshape2(x, shape)
out, _ =
_C_
ops.reshape2(x, shape)
return dygraph_utils._append_activation_in_dygraph(out, act)
return dygraph_utils._append_activation_in_dygraph(out, act)
...
@@ -6282,7 +6283,7 @@ def squeeze(input, axes, name=None):
...
@@ -6282,7 +6283,7 @@ def squeeze(input, axes, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
out, _ =
core.
ops.squeeze2(input, 'axes', axes)
out, _ =
_C_
ops.squeeze2(input, 'axes', axes)
return out
return out
helper = LayerHelper("squeeze", **locals())
helper = LayerHelper("squeeze", **locals())
...
@@ -6342,7 +6343,7 @@ def unsqueeze(input, axes, name=None):
...
@@ -6342,7 +6343,7 @@ def unsqueeze(input, axes, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes
for item in axes
]
]
out, _ =
core.
ops.unsqueeze2(input, 'axes', axes)
out, _ =
_C_
ops.unsqueeze2(input, 'axes', axes)
return out
return out
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
...
@@ -6865,8 +6866,7 @@ def label_smooth(label,
...
@@ -6865,8 +6866,7 @@ def label_smooth(label,
raise ValueError("The value of epsilon must be between 0 and 1.")
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
if in_dygraph_mode():
return core.ops.label_smooth(label, prior_dist, 'epsilon',
return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
float(epsilon))
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'label_smooth')
'label_smooth')
...
@@ -6957,7 +6957,7 @@ def roi_pool(input,
...
@@ -6957,7 +6957,7 @@ def roi_pool(input,
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
assert rois_num is not None, "rois_num should not be None in dygraph mode."
pool_out, argmaxes =
core.
ops.roi_pool(
pool_out, argmaxes =
_C_
ops.roi_pool(
input, rois, rois_num, "pooled_height", pooled_height,
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
return pool_out, argmaxes
return pool_out, argmaxes
...
@@ -7045,7 +7045,7 @@ def roi_align(input,
...
@@ -7045,7 +7045,7 @@ def roi_align(input,
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
assert rois_num is not None, "rois_num should not be None in dygraph mode."
align_out =
core.
ops.roi_align(
align_out =
_C_
ops.roi_align(
input, rois, rois_num, "pooled_height", pooled_height,
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
"sampling_ratio", sampling_ratio)
"sampling_ratio", sampling_ratio)
...
@@ -8314,7 +8314,7 @@ def gather(input, index, overwrite=True):
...
@@ -8314,7 +8314,7 @@ def gather(input, index, overwrite=True):
output = fluid.layers.gather(x, index)
output = fluid.layers.gather(x, index)
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.gather(input, index, None, 'overwrite', overwrite)
return
_C_
ops.gather(input, index, None, 'overwrite', overwrite)
check_variable_and_dtype(
check_variable_and_dtype(
input, 'x',
input, 'x',
...
@@ -8405,7 +8405,7 @@ def gather_nd(input, index, name=None):
...
@@ -8405,7 +8405,7 @@ def gather_nd(input, index, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.gather_nd(input, index)
return
_C_
ops.gather_nd(input, index)
check_variable_and_dtype(input, 'input',
check_variable_and_dtype(input, 'input',
['bool', 'float32', 'float64', 'int32', 'int64'],
['bool', 'float32', 'float64', 'int32', 'int64'],
'gather_np')
'gather_np')
...
@@ -8578,7 +8578,7 @@ def scatter_nd_add(ref, index, updates, name=None):
...
@@ -8578,7 +8578,7 @@ def scatter_nd_add(ref, index, updates, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
op = getattr(
core.
ops, 'scatter_nd_add')
op = getattr(
_C_
ops, 'scatter_nd_add')
return op(ref, index, updates)
return op(ref, index, updates)
if ref.dtype != updates.dtype:
if ref.dtype != updates.dtype:
...
@@ -8724,7 +8724,7 @@ def log(x, name=None):
...
@@ -8724,7 +8724,7 @@ def log(x, name=None):
# [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
# [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.log(x)
return
_C_
ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
inputs = {'X': [x]}
...
@@ -8764,7 +8764,7 @@ def relu(x, name=None):
...
@@ -8764,7 +8764,7 @@ def relu(x, name=None):
# [1. 2.6]]
# [1. 2.6]]
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.relu(x)
return
_C_
ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
...
@@ -8890,7 +8890,7 @@ def mean_iou(input, label, num_classes):
...
@@ -8890,7 +8890,7 @@ def mean_iou(input, label, num_classes):
mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes)
mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes)
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.mean_iou(input, label, 'num_classes', num_classes)
return
_C_
ops.mean_iou(input, label, 'num_classes', num_classes)
helper = LayerHelper('mean_iou', **locals())
helper = LayerHelper('mean_iou', **locals())
check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
...
@@ -9390,8 +9390,8 @@ def pad2d(input,
...
@@ -9390,8 +9390,8 @@ def pad2d(input,
if in_dygraph_mode():
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
paddings, Variable) else paddings
return
core.
ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
return
_C_
ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
'data_format', data_format, 'paddings', _paddings)
check_variable_and_dtype(
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
...
@@ -9587,7 +9587,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
...
@@ -9587,7 +9587,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
return
_C_
ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
...
@@ -9629,7 +9629,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
...
@@ -9629,7 +9629,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
return
_C_
ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_sigmoid')
'hard_sigmoid')
...
@@ -9839,7 +9839,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
...
@@ -9839,7 +9839,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
#[ 1. 10.]]
#[ 1. 10.]]
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.brelu(x, 't_min', t_min, 't_max', t_max)
return
_C_
ops.brelu(x, 't_min', t_min, 't_max', t_max)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
...
@@ -10098,7 +10098,7 @@ def stack(x, axis=0, name=None):
...
@@ -10098,7 +10098,7 @@ def stack(x, axis=0, name=None):
axis = 0 if axis is None else axis
axis = 0 if axis is None else axis
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.stack(x, 'axis', axis)
return
_C_
ops.stack(x, 'axis', axis)
if not isinstance(x, list) and not isinstance(x, tuple):
if not isinstance(x, list) and not isinstance(x, tuple):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
...
@@ -10251,7 +10251,7 @@ def unstack(x, axis=0, num=None):
...
@@ -10251,7 +10251,7 @@ def unstack(x, axis=0, num=None):
if in_dygraph_mode():
if in_dygraph_mode():
if num == None:
if num == None:
num = x.shape[axis]
num = x.shape[axis]
return
core.
ops.unstack(x, num, 'axis', int(axis), 'num', num)
return
_C_
ops.unstack(x, num, 'axis', int(axis), 'num', num)
helper = LayerHelper('unstack', **locals())
helper = LayerHelper('unstack', **locals())
if num is None:
if num is None:
...
@@ -10347,7 +10347,7 @@ def expand(x, expand_times, name=None):
...
@@ -10347,7 +10347,7 @@ def expand(x, expand_times, name=None):
expand_times_tensor = expand_times
expand_times_tensor = expand_times
expand_times_tensor.stop_gradient = True
expand_times_tensor.stop_gradient = True
return
core.
ops.expand(x, expand_times_tensor, *attrs)
return
_C_
ops.expand(x, expand_times_tensor, *attrs)
inputs = {"X": [x]}
inputs = {"X": [x]}
attrs = {}
attrs = {}
...
@@ -10455,7 +10455,7 @@ def expand_as(x, target_tensor, name=None):
...
@@ -10455,7 +10455,7 @@ def expand_as(x, target_tensor, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.expand_as(x, target_tensor)
return
_C_
ops.expand_as(x, target_tensor)
check_variable_and_dtype(
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
...
@@ -10671,10 +10671,9 @@ def gaussian_random(shape,
...
@@ -10671,10 +10671,9 @@ def gaussian_random(shape,
if in_dygraph_mode():
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
shape = utils.convert_shape_to_list(shape)
return core.ops.gaussian_random('shape', shape, 'mean',
return _C_ops.gaussian_random('shape', shape, 'mean',
float(mean), 'std',
float(mean), 'std',
float(std), 'seed', seed, 'dtype',
float(std), 'seed', seed, 'dtype', dtype)
dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn')
check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn')
...
@@ -10979,8 +10978,8 @@ def slice(input, axes, starts, ends):
...
@@ -10979,8 +10978,8 @@ def slice(input, axes, starts, ends):
ends_tensor.stop_gradient = True
ends_tensor.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes)))
infer_flags = list(-1 for i in range(len(axes)))
return
core.
ops.slice(input, starts_tensor, ends_tensor, 'axes', axes,
return
_C_
ops.slice(input, starts_tensor, ends_tensor, 'axes', axes,
'infer_flags', infer_flags, *attrs)
'infer_flags', infer_flags, *attrs)
if not isinstance(starts, (list, tuple, Variable)):
if not isinstance(starts, (list, tuple, Variable)):
raise ValueError(
raise ValueError(
...
@@ -11370,7 +11369,7 @@ def size(input):
...
@@ -11370,7 +11369,7 @@ def size(input):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.size(input)
return
_C_
ops.size(input)
check_variable_and_dtype(
check_variable_and_dtype(
input, 'input',
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size")
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size")
...
@@ -11459,9 +11458,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
...
@@ -11459,9 +11458,9 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
if in_dygraph_mode():
if in_dygraph_mode():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out =
core.
ops.scale(x, 'scale',
out =
_C_
ops.scale(x, 'scale',
float(_scale), 'bias',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
float(bias), 'bias_after_scale', bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out)
return dygraph_utils._append_activation_in_dygraph(out)
check_variable_and_dtype(x, "x", [
check_variable_and_dtype(x, "x", [
...
@@ -12147,7 +12146,7 @@ Examples:
...
@@ -12147,7 +12146,7 @@ Examples:
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if in_dygraph_mode():
if in_dygraph_mode():
op = getattr(
core.
ops, op_name)
op = getattr(
_C_
ops, op_name)
if binary_op:
if binary_op:
return op(x, y)
return op(x, y)
else:
else:
...
@@ -12404,7 +12403,7 @@ def clip_by_norm(x, max_norm, name=None):
...
@@ -12404,7 +12403,7 @@ def clip_by_norm(x, max_norm, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.clip_by_norm(x, 'max_norm', max_norm)
return
_C_
ops.clip_by_norm(x, 'max_norm', max_norm)
helper = LayerHelper("clip_by_norm", **locals())
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
...
@@ -12449,7 +12448,7 @@ def mean(x, name=None):
...
@@ -12449,7 +12448,7 @@ def mean(x, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.mean(x)
return
_C_
ops.mean(x)
helper = LayerHelper("mean", **locals())
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
...
@@ -12530,8 +12529,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
...
@@ -12530,8 +12529,8 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
return
_C_
ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
'y_num_col_dims', y_num_col_dims)
'y_num_col_dims', y_num_col_dims)
inputs = {"X": [x], "Y": [y]}
inputs = {"X": [x], "Y": [y]}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
...
@@ -13156,8 +13155,7 @@ def add_position_encoding(input, alpha, beta, name=None):
...
@@ -13156,8 +13155,7 @@ def add_position_encoding(input, alpha, beta, name=None):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return core.ops.add_position_encoding(input, "alpha", alpha, "beta",
return _C_ops.add_position_encoding(input, "alpha", alpha, "beta", beta)
beta)
helper = LayerHelper('add_position_encoding', **locals())
helper = LayerHelper('add_position_encoding', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
...
@@ -13411,8 +13409,8 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
...
@@ -13411,8 +13409,8 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. "
raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. "
"Received Attr(data_format): {}.".format(data_format))
"Received Attr(data_format): {}.".format(data_format))
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio',
return
_C_
ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio',
shift_ratio, 'data_format', data_format)
shift_ratio, 'data_format', data_format)
helper = LayerHelper("temporal_shift", **locals())
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
...
@@ -14107,7 +14105,7 @@ def where(condition):
...
@@ -14107,7 +14105,7 @@ def where(condition):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.where_index(condition)
return
_C_
ops.where_index(condition)
helper = LayerHelper("where_index", **locals())
helper = LayerHelper("where_index", **locals())
...
@@ -14890,8 +14888,8 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
...
@@ -14890,8 +14888,8 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
print(out) # [[0.66666667, 1.66666667,3., 4.]]
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.hard_swish(x, 'threshold', threshold, 'scale', scale,
return
_C_
ops.hard_swish(x, 'threshold', threshold, 'scale', scale,
'offset', offset)
'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_swish')
'hard_swish')
...
@@ -15045,7 +15043,7 @@ def gather_tree(ids, parents):
...
@@ -15045,7 +15043,7 @@ def gather_tree(ids, parents):
"""
"""
if in_dygraph_mode():
if in_dygraph_mode():
return
core.
ops.gather_tree(ids, parents)
return
_C_
ops.gather_tree(ids, parents)
else:
else:
helper = LayerHelper('gather_tree', **locals())
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
...
@@ -15143,9 +15141,9 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
...
@@ -15143,9 +15141,9 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
if in_dygraph_mode():
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
shape = utils.convert_shape_to_list(shape)
return
core.
ops.uniform_random('shape', shape, 'min',
return
_C_
ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)
float(max), 'seed', seed, 'dtype', dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'),
check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'),
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
f05098b5
...
@@ -32,6 +32,7 @@ from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, con
...
@@ -32,6 +32,7 @@ from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, con
from
paddle.utils
import
deprecated
from
paddle.utils
import
deprecated
from
.utils
import
check_shape
from
.utils
import
check_shape
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'create_tensor'
,
'create_tensor'
,
...
@@ -237,7 +238,7 @@ def cast(x, dtype):
...
@@ -237,7 +238,7 @@ def cast(x, dtype):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
out
=
core
.
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
out
=
_C_
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
out
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
check_variable_and_dtype
(
x
,
'x'
,
[
...
@@ -313,7 +314,7 @@ def concat(input, axis=0, name=None):
...
@@ -313,7 +314,7 @@ def concat(input, axis=0, name=None):
if
isinstance
(
axis
,
Variable
):
if
isinstance
(
axis
,
Variable
):
axis
=
axis
.
numpy
()
axis
=
axis
.
numpy
()
axis
=
axis
.
item
(
0
)
axis
=
axis
.
item
(
0
)
return
core
.
ops
.
concat
(
input
,
'axis'
,
axis
)
return
_C_
ops
.
concat
(
input
,
'axis'
,
axis
)
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
not
isinstance
(
input
,
Variable
):
if
not
isinstance
(
input
,
Variable
):
...
@@ -721,10 +722,10 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
...
@@ -721,10 +722,10 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
else
:
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
.
numpy
().
item
(
0
)))
attrs
[
'str_value'
]
=
str
(
float
(
value
.
numpy
().
item
(
0
)))
core
.
ops
.
fill_constant
(
out
,
'value'
,
_C_
ops
.
fill_constant
(
out
,
'value'
,
float
(
value
),
'force_cpu'
,
force_cpu
,
'dtype'
,
float
(
value
),
'force_cpu'
,
force_cpu
,
'dtype'
,
out
.
dtype
,
'str_value'
,
attrs
[
'str_value'
],
out
.
dtype
,
'str_value'
,
attrs
[
'str_value'
],
'shape'
,
shape
)
'shape'
,
shape
)
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
...
@@ -1281,7 +1282,7 @@ def has_inf(x):
...
@@ -1281,7 +1282,7 @@ def has_inf(x):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
isinf
(
x
)
return
_C_
ops
.
isinf
(
x
)
check_type
(
x
,
'x'
,
(
Variable
),
'has_inf'
)
check_type
(
x
,
'x'
,
(
Variable
),
'has_inf'
)
helper
=
LayerHelper
(
"isinf"
,
**
locals
())
helper
=
LayerHelper
(
"isinf"
,
**
locals
())
...
@@ -1310,7 +1311,7 @@ def has_nan(x):
...
@@ -1310,7 +1311,7 @@ def has_nan(x):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
isnan
(
x
)
return
_C_
ops
.
isnan
(
x
)
check_type
(
x
,
'x'
,
(
Variable
),
'has_nan'
)
check_type
(
x
,
'x'
,
(
Variable
),
'has_nan'
)
helper
=
LayerHelper
(
"isnan"
,
**
locals
())
helper
=
LayerHelper
(
"isnan"
,
**
locals
())
...
@@ -1422,7 +1423,7 @@ def range(start, end, step, dtype, name=None):
...
@@ -1422,7 +1423,7 @@ def range(start, end, step, dtype, name=None):
step
=
cast
(
step
,
dtype
)
step
=
cast
(
step
,
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
range
(
start
,
end
,
step
)
return
_C_
ops
.
range
(
start
,
end
,
step
)
out_shape
=
None
out_shape
=
None
if
not
isinstance
(
start
,
Variable
)
and
not
isinstance
(
if
not
isinstance
(
start
,
Variable
)
and
not
isinstance
(
...
@@ -1491,8 +1492,8 @@ def linspace(start, stop, num, dtype=None, name=None):
...
@@ -1491,8 +1492,8 @@ def linspace(start, stop, num, dtype=None, name=None):
with
device_guard
(
"cpu"
):
with
device_guard
(
"cpu"
):
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
)
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
return
_C_
ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
dtype
)
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
...
@@ -1679,8 +1680,8 @@ def eye(num_rows,
...
@@ -1679,8 +1680,8 @@ def eye(num_rows,
num_columns
=
num_rows
num_columns
=
num_rows
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
out
=
_C_
ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
num_columns
)
num_columns
)
else
:
else
:
helper
=
LayerHelper
(
"eye"
,
**
locals
())
helper
=
LayerHelper
(
"eye"
,
**
locals
())
...
@@ -1705,8 +1706,8 @@ def eye(num_rows,
...
@@ -1705,8 +1706,8 @@ def eye(num_rows,
re_shape
=
re_shape
+
[
num_rows
,
num_columns
]
re_shape
=
re_shape
+
[
num_rows
,
num_columns
]
expand_times
=
batch_shape
+
[
1
,
1
]
expand_times
=
batch_shape
+
[
1
,
1
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
reshape
(
out
,
'shape'
,
re_shape
)
out
=
_C_
ops
.
reshape
(
out
,
'shape'
,
re_shape
)
return
core
.
ops
.
expand
(
out
,
None
,
'expand_times'
,
expand_times
)
return
_C_
ops
.
expand
(
out
,
None
,
'expand_times'
,
expand_times
)
if
not
isinstance
(
batch_shape
,
list
):
if
not
isinstance
(
batch_shape
,
list
):
raise
TypeError
(
"batch_shape should be a list"
)
raise
TypeError
(
"batch_shape should be a list"
)
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
f05098b5
...
@@ -43,6 +43,7 @@ from functools import cmp_to_key
...
@@ -43,6 +43,7 @@ from functools import cmp_to_key
from
.wrapped_decorator
import
signature_safe_contextmanager
from
.wrapped_decorator
import
signature_safe_contextmanager
from
..
import
compat
as
cpt
from
..
import
compat
as
cpt
import
warnings
import
warnings
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'Dpsgd'
,
'DecayedAdagrad'
,
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'Dpsgd'
,
'DecayedAdagrad'
,
...
@@ -915,7 +916,7 @@ class Optimizer(object):
...
@@ -915,7 +916,7 @@ class Optimizer(object):
assert
regularization_term
is
not
None
assert
regularization_term
is
not
None
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
return
core
.
ops
.
sum
([
grad
,
regularization_term
])
return
_C_
ops
.
sum
([
grad
,
regularization_term
])
new_grad
=
grad
new_grad
=
grad
if
grad
.
type
==
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
:
if
grad
.
type
==
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
:
...
@@ -1295,8 +1296,8 @@ class SGDOptimizer(Optimizer):
...
@@ -1295,8 +1296,8 @@ class SGDOptimizer(Optimizer):
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
core
.
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
_C_
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
param_and_grad
[
0
])
param_and_grad
[
0
])
return
None
return
None
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
...
@@ -1420,10 +1421,10 @@ class MomentumOptimizer(Optimizer):
...
@@ -1420,10 +1421,10 @@ class MomentumOptimizer(Optimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
_
,
_
=
core
.
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
_
,
_
=
_C_
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
velocity_acc
,
'mu'
,
self
.
_momentum
,
'use_nesterov'
,
self
.
_use_nesterov
)
'use_nesterov'
,
self
.
_use_nesterov
)
return
None
return
None
attrs
=
{
"mu"
:
self
.
_momentum
,
"use_nesterov"
:
self
.
_use_nesterov
}
attrs
=
{
"mu"
:
self
.
_momentum
,
"use_nesterov"
:
self
.
_use_nesterov
}
...
@@ -2447,7 +2448,7 @@ class AdamOptimizer(Optimizer):
...
@@ -2447,7 +2448,7 @@ class AdamOptimizer(Optimizer):
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
_beta2
=
self
.
_beta2
if
not
isinstance
(
_beta2
=
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
adam
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
adam
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'epsilon'
,
self
.
_epsilon
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'epsilon'
,
self
.
_epsilon
,
...
@@ -3510,7 +3511,7 @@ class LambOptimizer(AdamOptimizer):
...
@@ -3510,7 +3511,7 @@ class LambOptimizer(AdamOptimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
lamb
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
lamb
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'beta1'
,
self
.
_beta1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'beta1'
,
self
.
_beta1
,
...
...
python/paddle/fluid/regularizer.py
浏览文件 @
f05098b5
...
@@ -18,6 +18,7 @@ import logging
...
@@ -18,6 +18,7 @@ import logging
from
.
import
framework
from
.
import
framework
from
.framework
import
in_dygraph_mode
,
_varbase_creator
from
.framework
import
in_dygraph_mode
,
_varbase_creator
from
.
import
core
from
.
import
core
from
paddle
import
_C_ops
__all__
=
[
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
__all__
=
[
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
...
@@ -133,7 +134,7 @@ class L2DecayRegularizer(WeightDecayRegularizer):
...
@@ -133,7 +134,7 @@ class L2DecayRegularizer(WeightDecayRegularizer):
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
return
core
.
ops
.
scale
(
param
,
"scale"
,
self
.
_regularization_coeff
)
return
_C_
ops
.
scale
(
param
,
"scale"
,
self
.
_regularization_coeff
)
else
:
else
:
decay
=
block
.
create_var
(
decay
=
block
.
create_var
(
dtype
=
param
.
dtype
,
shape
=
param
.
shape
,
lod_level
=
param
.
lod_level
)
dtype
=
param
.
dtype
,
shape
=
param
.
shape
,
lod_level
=
param
.
lod_level
)
...
...
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
浏览文件 @
f05098b5
...
@@ -23,7 +23,7 @@ import paddle.fluid.core as core
...
@@ -23,7 +23,7 @@ import paddle.fluid.core as core
import
sys
import
sys
import
warnings
import
warnings
import
paddle.utils.deprecated
as
deprecated
import
paddle.utils.deprecated
as
deprecated
from
paddle
import
_C_ops
LOWEST_WARNING_POSTION
=
3
LOWEST_WARNING_POSTION
=
3
ERROR_WARNING_POSTION
=
sys
.
maxsize
ERROR_WARNING_POSTION
=
sys
.
maxsize
...
...
python/paddle/incubate/optimizer/modelaverage.py
浏览文件 @
f05098b5
...
@@ -20,6 +20,7 @@ import paddle
...
@@ -20,6 +20,7 @@ import paddle
import
numpy
as
np
import
numpy
as
np
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.wrapped_decorator
import
signature_safe_contextmanager
from
paddle.fluid.wrapped_decorator
import
signature_safe_contextmanager
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -226,7 +227,7 @@ class ModelAverage(Optimizer):
...
@@ -226,7 +227,7 @@ class ModelAverage(Optimizer):
param_and_grad
[
0
])
param_and_grad
[
0
])
num_updates
=
self
.
_get_accumulator
(
'num_updates'
,
param_and_grad
[
0
])
num_updates
=
self
.
_get_accumulator
(
'num_updates'
,
param_and_grad
[
0
])
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
_
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
average_accumulates
(
_
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
average_accumulates
(
param_and_grad
[
0
],
sum_1
,
sum_2
,
sum_3
,
num_accumulates
,
param_and_grad
[
0
],
sum_1
,
sum_2
,
sum_3
,
num_accumulates
,
old_num_accumulates
,
num_updates
,
sum_1
,
sum_2
,
sum_3
,
old_num_accumulates
,
num_updates
,
sum_1
,
sum_2
,
sum_3
,
num_accumulates
,
old_num_accumulates
,
num_updates
,
num_accumulates
,
old_num_accumulates
,
num_updates
,
...
...
python/paddle/metric/metrics.py
浏览文件 @
f05098b5
...
@@ -25,6 +25,7 @@ from ..fluid.layer_helper import LayerHelper
...
@@ -25,6 +25,7 @@ from ..fluid.layer_helper import LayerHelper
from
..fluid.layers.nn
import
topk
from
..fluid.layers.nn
import
topk
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -798,8 +799,8 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
...
@@ -798,8 +799,8 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
topk
(
input
,
k
=
k
)
topk_out
,
topk_indices
=
topk
(
input
,
k
=
k
)
_acc
,
_
,
_
=
core
.
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
total
)
return
_acc
return
_acc
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
helper
=
LayerHelper
(
"accuracy"
,
**
locals
())
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
f05098b5
...
@@ -26,6 +26,7 @@ from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_
...
@@ -26,6 +26,7 @@ from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_
from
...fluid
import
core
from
...fluid
import
core
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -60,7 +61,7 @@ def elu(x, alpha=1.0, name=None):
...
@@ -60,7 +61,7 @@ def elu(x, alpha=1.0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
elu
(
x
,
'alpha'
,
alpha
)
return
_C_
ops
.
elu
(
x
,
'alpha'
,
alpha
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'elu'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'elu'
)
helper
=
LayerHelper
(
"elu"
,
**
locals
())
helper
=
LayerHelper
(
"elu"
,
**
locals
())
...
@@ -79,7 +80,7 @@ def elu_(x, alpha=1.0, name=None):
...
@@ -79,7 +80,7 @@ def elu_(x, alpha=1.0, name=None):
Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_elu`.
Please refer to :ref:`api_nn_cn_elu`.
"""
"""
return
core
.
ops
.
elu_
(
x
,
'alpha'
,
alpha
)
return
_C_
ops
.
elu_
(
x
,
'alpha'
,
alpha
)
def
gelu
(
x
,
approximate
=
False
,
name
=
None
):
def
gelu
(
x
,
approximate
=
False
,
name
=
None
):
...
@@ -123,7 +124,7 @@ def gelu(x, approximate=False, name=None):
...
@@ -123,7 +124,7 @@ def gelu(x, approximate=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
gelu
(
x
,
'approximate'
,
approximate
)
return
_C_
ops
.
gelu
(
x
,
'approximate'
,
approximate
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'gelu'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'gelu'
)
helper
=
LayerHelper
(
"gelu"
,
**
locals
())
helper
=
LayerHelper
(
"gelu"
,
**
locals
())
...
@@ -171,7 +172,7 @@ def hardshrink(x, threshold=0.5, name=None):
...
@@ -171,7 +172,7 @@ def hardshrink(x, threshold=0.5, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
hard_shrink
(
x
,
'threshold'
,
threshold
)
return
_C_
ops
.
hard_shrink
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardshrink'
)
'hardshrink'
)
...
@@ -219,7 +220,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
...
@@ -219,7 +220,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
brelu
(
x
,
't_min'
,
min
,
't_max'
,
max
)
return
_C_
ops
.
brelu
(
x
,
't_min'
,
min
,
't_max'
,
max
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardtanh'
)
'hardtanh'
)
...
@@ -274,7 +275,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
...
@@ -274,7 +275,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
hard_sigmoid
(
x
,
'slope'
,
slope
,
'offset'
,
offset
)
return
_C_
ops
.
hard_sigmoid
(
x
,
'slope'
,
slope
,
'offset'
,
offset
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardsigmoid'
)
'hardsigmoid'
)
...
@@ -328,7 +329,7 @@ def hardswish(x, name=None):
...
@@ -328,7 +329,7 @@ def hardswish(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
hard_swish
(
x
)
return
_C_
ops
.
hard_swish
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardswish'
)
'hardswish'
)
...
@@ -373,7 +374,7 @@ def leaky_relu(x, negative_slope=0.01, name=None):
...
@@ -373,7 +374,7 @@ def leaky_relu(x, negative_slope=0.01, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
leaky_relu
(
x
,
'alpha'
,
negative_slope
)
return
_C_
ops
.
leaky_relu
(
x
,
'alpha'
,
negative_slope
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'leaky_relu'
)
'leaky_relu'
)
...
@@ -447,7 +448,7 @@ def prelu(x, weight, name=None):
...
@@ -447,7 +448,7 @@ def prelu(x, weight, name=None):
mode
=
'channel'
mode
=
'channel'
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
prelu
(
x
,
weight
,
'mode'
,
mode
)
return
_C_
ops
.
prelu
(
x
,
weight
,
'mode'
,
mode
)
helper
=
LayerHelper
(
'prelu'
,
**
locals
())
helper
=
LayerHelper
(
'prelu'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
@@ -488,7 +489,7 @@ def relu(x, name=None):
...
@@ -488,7 +489,7 @@ def relu(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
relu
(
x
)
return
_C_
ops
.
relu
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu'
)
helper
=
LayerHelper
(
'relu'
,
**
locals
())
helper
=
LayerHelper
(
'relu'
,
**
locals
())
...
@@ -503,7 +504,7 @@ def relu_(x, name=None):
...
@@ -503,7 +504,7 @@ def relu_(x, name=None):
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`.
Please refer to :ref:`api_nn_cn_relu`.
"""
"""
return
core
.
ops
.
relu_
(
x
)
return
_C_
ops
.
relu_
(
x
)
def
log_sigmoid
(
x
,
name
=
None
):
def
log_sigmoid
(
x
,
name
=
None
):
...
@@ -533,7 +534,7 @@ def log_sigmoid(x, name=None):
...
@@ -533,7 +534,7 @@ def log_sigmoid(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
logsigmoid
(
x
)
return
_C_
ops
.
logsigmoid
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'log_sigmoid'
)
'log_sigmoid'
)
...
@@ -597,7 +598,7 @@ def maxout(x, groups, axis=1, name=None):
...
@@ -597,7 +598,7 @@ def maxout(x, groups, axis=1, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
return
_C_
ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'maxout'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'maxout'
)
if
axis
not
in
[
1
,
-
1
,
3
]:
if
axis
not
in
[
1
,
-
1
,
3
]:
...
@@ -646,7 +647,7 @@ def relu6(x, name=None):
...
@@ -646,7 +647,7 @@ def relu6(x, name=None):
"""
"""
threshold
=
6.0
threshold
=
6.0
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
relu6
(
x
,
'threshold'
,
threshold
)
return
_C_
ops
.
relu6
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu6'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu6'
)
helper
=
LayerHelper
(
'relu6'
,
**
locals
())
helper
=
LayerHelper
(
'relu6'
,
**
locals
())
...
@@ -703,7 +704,7 @@ def selu(x,
...
@@ -703,7 +704,7 @@ def selu(x,
"The alpha must be no less than zero. Received: {}."
.
format
(
alpha
))
"The alpha must be no less than zero. Received: {}."
.
format
(
alpha
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
selu
(
x
,
'scale'
,
scale
,
'alpha'
,
alpha
)
return
_C_
ops
.
selu
(
x
,
'scale'
,
scale
,
'alpha'
,
alpha
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'selu'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'selu'
)
helper
=
LayerHelper
(
'selu'
,
**
locals
())
helper
=
LayerHelper
(
'selu'
,
**
locals
())
...
@@ -741,7 +742,7 @@ def silu(x, name=None):
...
@@ -741,7 +742,7 @@ def silu(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
silu
(
x
)
return
_C_
ops
.
silu
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'silu'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'silu'
)
helper
=
LayerHelper
(
"silu"
,
**
locals
())
helper
=
LayerHelper
(
"silu"
,
**
locals
())
...
@@ -872,8 +873,8 @@ def softmax(x, axis=-1, dtype=None, name=None):
...
@@ -872,8 +873,8 @@ def softmax(x, axis=-1, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
outs_cast
=
x
if
dtype
is
None
\
outs_cast
=
x
if
dtype
is
None
\
else
core
.
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
else
_C_
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
core
.
ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
return
_C_
ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
if
dtype
is
None
:
if
dtype
is
None
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
...
@@ -913,7 +914,7 @@ def softmax_(x, axis=-1, dtype=None, name=None):
...
@@ -913,7 +914,7 @@ def softmax_(x, axis=-1, dtype=None, name=None):
if
(
dtype
is
not
None
)
and
(
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
)):
if
(
dtype
is
not
None
)
and
(
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
)):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
use_cudnn
=
True
use_cudnn
=
True
return
core
.
ops
.
softmax_
(
x
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
return
_C_
ops
.
softmax_
(
x
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
def
softplus
(
x
,
beta
=
1
,
threshold
=
20
,
name
=
None
):
def
softplus
(
x
,
beta
=
1
,
threshold
=
20
,
name
=
None
):
...
@@ -946,7 +947,7 @@ def softplus(x, beta=1, threshold=20, name=None):
...
@@ -946,7 +947,7 @@ def softplus(x, beta=1, threshold=20, name=None):
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
softplus
(
x
,
'beta'
,
beta
,
'threshold'
,
threshold
)
return
_C_
ops
.
softplus
(
x
,
'beta'
,
beta
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softplus'
)
'softplus'
)
...
@@ -998,7 +999,7 @@ def softshrink(x, threshold=0.5, name=None):
...
@@ -998,7 +999,7 @@ def softshrink(x, threshold=0.5, name=None):
threshold
))
threshold
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
softshrink
(
x
,
'lambda'
,
threshold
)
return
_C_
ops
.
softshrink
(
x
,
'lambda'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softshrink'
)
'softshrink'
)
...
@@ -1039,7 +1040,7 @@ def softsign(x, name=None):
...
@@ -1039,7 +1040,7 @@ def softsign(x, name=None):
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
softsign
(
x
)
return
_C_
ops
.
softsign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softsign'
)
'softsign'
)
...
@@ -1077,7 +1078,7 @@ def swish(x, name=None):
...
@@ -1077,7 +1078,7 @@ def swish(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
swish
(
x
,
'beta'
,
1.0
)
return
_C_
ops
.
swish
(
x
,
'beta'
,
1.0
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'swish'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'swish'
)
helper
=
LayerHelper
(
'swish'
,
**
locals
())
helper
=
LayerHelper
(
'swish'
,
**
locals
())
...
@@ -1117,7 +1118,7 @@ def tanhshrink(x, name=None):
...
@@ -1117,7 +1118,7 @@ def tanhshrink(x, name=None):
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
tanh_shrink
(
x
)
return
_C_
ops
.
tanh_shrink
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanhshrink'
)
'tanhshrink'
)
...
@@ -1159,7 +1160,7 @@ def thresholded_relu(x, threshold=1.0, name=None):
...
@@ -1159,7 +1160,7 @@ def thresholded_relu(x, threshold=1.0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
thresholded_relu
(
x
,
'threshold'
,
threshold
)
return
_C_
ops
.
thresholded_relu
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'thresholded_relu'
)
'thresholded_relu'
)
...
@@ -1234,8 +1235,8 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
...
@@ -1234,8 +1235,8 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
dtype
is
not
None
:
if
dtype
is
not
None
:
x
=
core
.
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
x
=
_C_
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
core
.
ops
.
log_softmax
(
x
,
'axis'
,
axis
)
return
_C_
ops
.
log_softmax
(
x
,
'axis'
,
axis
)
if
dtype
is
None
:
if
dtype
is
None
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
...
...
python/paddle/nn/functional/common.py
浏览文件 @
f05098b5
...
@@ -33,6 +33,7 @@ from ...fluid.framework import in_dygraph_mode
...
@@ -33,6 +33,7 @@ from ...fluid.framework import in_dygraph_mode
from
...fluid
import
core
,
dygraph_utils
from
...fluid
import
core
,
dygraph_utils
from
...fluid
import
core
,
layers
from
...fluid
import
core
,
layers
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid.data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -452,15 +453,15 @@ def interpolate(x,
...
@@ -452,15 +453,15 @@ def interpolate(x,
dy_attr
=
tuple
(
attr_list
)
dy_attr
=
tuple
(
attr_list
)
if
resample_type
==
"linear"
:
if
resample_type
==
"linear"
:
out
=
core
.
ops
.
linear_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
linear_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"bilinear"
:
elif
resample_type
==
"bilinear"
:
out
=
core
.
ops
.
bilinear_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
bilinear_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"trilinear"
:
elif
resample_type
==
"trilinear"
:
out
=
core
.
ops
.
trilinear_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
trilinear_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"nearest"
:
elif
resample_type
==
"nearest"
:
out
=
core
.
ops
.
nearest_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
nearest_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"bicubic"
:
elif
resample_type
==
"bicubic"
:
out
=
core
.
ops
.
bicubic_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
bicubic_interp_v2
(
x
,
*
dy_attr
)
return
out
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
helper
.
append_op
(
...
@@ -710,7 +711,7 @@ def bilinear(x1, x2, weight, bias=None, name=None):
...
@@ -710,7 +711,7 @@ def bilinear(x1, x2, weight, bias=None, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
bilinear_tensor_product
(
x1
,
x2
,
weight
,
bias
)
return
_C_
ops
.
bilinear_tensor_product
(
x1
,
x2
,
weight
,
bias
)
check_variable_and_dtype
(
x1
,
'x1'
,
[
'float32'
,
'float64'
],
'bilinear'
)
check_variable_and_dtype
(
x1
,
'x1'
,
[
'float32'
,
'float64'
],
'bilinear'
)
check_variable_and_dtype
(
x2
,
'x2'
,
[
'float32'
,
'float64'
],
'bilinear'
)
check_variable_and_dtype
(
x2
,
'x2'
,
[
'float32'
,
'float64'
],
'bilinear'
)
...
@@ -884,7 +885,7 @@ def dropout(x,
...
@@ -884,7 +885,7 @@ def dropout(x,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
default_main_program
().
random_seed
!=
0
:
if
default_main_program
().
random_seed
!=
0
:
seed
=
default_main_program
().
random_seed
seed
=
default_main_program
().
random_seed
out
,
mask
=
core
.
ops
.
dropout
(
out
,
mask
=
_C_
ops
.
dropout
(
x
,
'dropout_prob'
,
p
,
'is_test'
,
not
training
,
'fix_seed'
,
x
,
'dropout_prob'
,
p
,
'is_test'
,
not
training
,
'fix_seed'
,
seed
is
not
None
,
'seed'
,
seed
seed
is
not
None
,
'seed'
,
seed
if
seed
is
not
None
else
0
,
'dropout_implementation'
,
mode
)
if
seed
is
not
None
else
0
,
'dropout_implementation'
,
mode
)
...
@@ -1316,8 +1317,8 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
...
@@ -1316,8 +1317,8 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
isinstance
(
pad
,
Variable
):
if
isinstance
(
pad
,
Variable
):
pad
=
pad
.
numpy
()
pad
=
pad
.
numpy
()
out
=
core
.
ops
.
pad3d
(
x
,
"paddings"
,
pad
,
"mode"
,
mode
,
"value"
,
value
,
out
=
_C_
ops
.
pad3d
(
x
,
"paddings"
,
pad
,
"mode"
,
mode
,
"value"
,
value
,
"data_format"
,
data_format
,
"name"
,
name
)
"data_format"
,
data_format
,
"name"
,
name
)
else
:
else
:
attrs
=
{
'mode'
:
mode
,
'value'
:
value
,
'data_format'
:
data_format
}
attrs
=
{
'mode'
:
mode
,
'value'
:
value
,
'data_format'
:
data_format
}
inputs
=
{
'X'
:
[
x
]}
inputs
=
{
'X'
:
[
x
]}
...
@@ -1447,13 +1448,13 @@ def linear(x, weight, bias=None, name=None):
...
@@ -1447,13 +1448,13 @@ def linear(x, weight, bias=None, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pre_bias
=
_varbase_creator
(
dtype
=
x
.
dtype
)
pre_bias
=
_varbase_creator
(
dtype
=
x
.
dtype
)
core
.
ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
_C_ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
'transpose_Y'
,
False
,
"alpha"
,
1
)
False
,
"alpha"
,
1
)
if
bias
is
None
:
if
bias
is
None
:
return
pre_bias
return
pre_bias
return
core
.
ops
.
elementwise_add
(
pre_bias
,
bias
)
return
_C_
ops
.
elementwise_add
(
pre_bias
,
bias
)
else
:
else
:
helper
=
LayerHelper
(
'linear'
,
**
locals
())
helper
=
LayerHelper
(
'linear'
,
**
locals
())
dtype
=
x
.
dtype
dtype
=
x
.
dtype
...
@@ -1546,8 +1547,7 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
...
@@ -1546,8 +1547,7 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
raise
ValueError
(
"The value of epsilon must be between 0 and 1."
)
raise
ValueError
(
"The value of epsilon must be between 0 and 1."
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
label_smooth
(
label
,
prior_dist
,
'epsilon'
,
return
_C_ops
.
label_smooth
(
label
,
prior_dist
,
'epsilon'
,
float
(
epsilon
))
float
(
epsilon
))
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
'label_smooth'
)
'label_smooth'
)
...
...
python/paddle/nn/functional/conv.py
浏览文件 @
f05098b5
...
@@ -22,6 +22,7 @@ from ...fluid.layers import nn, utils
...
@@ -22,6 +22,7 @@ from ...fluid.layers import nn, utils
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid.param_attr
import
ParamAttr
from
...fluid.param_attr
import
ParamAttr
from
...fluid.layer_helper
import
LayerHelper
from
...fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -115,7 +116,7 @@ def _conv_nd(x,
...
@@ -115,7 +116,7 @@ def _conv_nd(x,
use_mkldnn
,
'fuse_relu_before_depthwise_conv'
,
False
,
use_mkldnn
,
'fuse_relu_before_depthwise_conv'
,
False
,
"padding_algorithm"
,
padding_algorithm
,
"data_format"
,
"padding_algorithm"
,
padding_algorithm
,
"data_format"
,
data_format
)
data_format
)
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
else
:
else
:
...
@@ -339,7 +340,7 @@ def conv1d(x,
...
@@ -339,7 +340,7 @@ def conv1d(x,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'use_mkldnn'
,
False
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'use_mkldnn'
,
False
,
'fuse_relu_before_depthwise_conv'
,
False
,
"padding_algorithm"
,
'fuse_relu_before_depthwise_conv'
,
False
,
"padding_algorithm"
,
padding_algorithm
,
"data_format"
,
conv2d_data_format
)
padding_algorithm
,
"data_format"
,
conv2d_data_format
)
out
=
getattr
(
core
.
ops
,
l_type
)(
x
,
weight
,
*
attrs
)
out
=
getattr
(
_C_
ops
,
l_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
out
,
bias
,
axis
=
channel_dim
)
out
=
nn
.
elementwise_add
(
out
,
bias
,
axis
=
channel_dim
)
else
:
else
:
...
@@ -775,7 +776,7 @@ def conv1d_transpose(x,
...
@@ -775,7 +776,7 @@ def conv1d_transpose(x,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'dilations'
,
dilation
,
'groups'
,
groups
,
padding_algorithm
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'data_format'
,
conv2d_data_format
)
'use_cudnn'
,
use_cudnn
,
'data_format'
,
conv2d_data_format
)
out
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
out
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
out
,
bias
,
axis
=
channel_dim
)
out
=
nn
.
elementwise_add
(
out
,
bias
,
axis
=
channel_dim
)
else
:
else
:
...
@@ -1010,7 +1011,7 @@ def conv2d_transpose(x,
...
@@ -1010,7 +1011,7 @@ def conv2d_transpose(x,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'dilations'
,
dilation
,
'groups'
,
groups
,
padding_algorithm
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'data_format'
,
data_format
)
'use_cudnn'
,
use_cudnn
,
'data_format'
,
data_format
)
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
else
:
else
:
...
@@ -1402,7 +1403,7 @@ def conv3d_transpose(x,
...
@@ -1402,7 +1403,7 @@ def conv3d_transpose(x,
'paddings'
,
padding
,
"padding_algorithm"
,
padding_algorithm
,
'paddings'
,
padding
,
"padding_algorithm"
,
padding_algorithm
,
'strides'
,
stride
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'strides'
,
stride
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
"data_format"
,
data_format_
)
'use_cudnn'
,
use_cudnn
,
"data_format"
,
data_format_
)
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
else
:
else
:
...
...
python/paddle/nn/functional/input.py
浏览文件 @
f05098b5
...
@@ -18,6 +18,7 @@ from ...fluid.framework import Variable, in_dygraph_mode
...
@@ -18,6 +18,7 @@ from ...fluid.framework import Variable, in_dygraph_mode
from
...fluid.layer_helper
import
LayerHelper
from
...fluid.layer_helper
import
LayerHelper
from
...fluid.layers
import
core
from
...fluid.layers
import
core
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -86,8 +87,8 @@ def one_hot(x, num_classes, name=None):
...
@@ -86,8 +87,8 @@ def one_hot(x, num_classes, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
one_hot_v2
(
x
,
'depth'
,
num_classes
,
return
_C_ops
.
one_hot_v2
(
x
,
'depth'
,
num_classes
,
'allow_out_of_range'
,
'allow_out_of_range'
,
False
)
False
)
else
:
else
:
check_variable_and_dtype
(
x
,
'input'
,
[
'int32'
,
'int64'
],
'one_hot_v2'
)
check_variable_and_dtype
(
x
,
'input'
,
[
'int32'
,
'int64'
],
'one_hot_v2'
)
helper
=
LayerHelper
(
"one_hot_v2"
,
**
locals
())
helper
=
LayerHelper
(
"one_hot_v2"
,
**
locals
())
...
@@ -195,7 +196,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
...
@@ -195,7 +196,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
weight
.
shape
[
0
],
weight
.
shape
[
0
]))
weight
.
shape
[
0
],
weight
.
shape
[
0
]))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
lookup_table_v2
(
return
_C_
ops
.
lookup_table_v2
(
weight
,
x
,
'is_sparse'
,
sparse
,
'is_distributed'
,
False
,
weight
,
x
,
'is_sparse'
,
sparse
,
'is_distributed'
,
False
,
'remote_prefetch'
,
False
,
'padding_idx'
,
padding_idx
)
'remote_prefetch'
,
False
,
'padding_idx'
,
padding_idx
)
else
:
else
:
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
f05098b5
...
@@ -38,6 +38,7 @@ from ...fluid.framework import in_dygraph_mode
...
@@ -38,6 +38,7 @@ from ...fluid.framework import in_dygraph_mode
from
...fluid.framework
import
_varbase_creator
from
...fluid.framework
import
_varbase_creator
from
...fluid.framework
import
Variable
from
...fluid.framework
import
Variable
from
paddle.utils
import
deprecated
from
paddle.utils
import
deprecated
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -115,15 +116,15 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
...
@@ -115,15 +116,15 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
reduction
)
reduction
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
bce_loss
(
input
,
label
)
out
=
_C_
ops
.
bce_loss
(
input
,
label
)
if
weight
is
not
None
:
if
weight
is
not
None
:
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight
,
'axis'
,
-
1
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight
,
'axis'
,
-
1
)
if
reduction
==
'sum'
:
if
reduction
==
'sum'
:
return
core
.
ops
.
reduce_sum
(
out
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
return
_C_
ops
.
reduce_sum
(
out
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
"reduce_all"
,
True
)
"reduce_all"
,
True
)
elif
reduction
==
'mean'
:
elif
reduction
==
'mean'
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
else
:
else
:
return
out
return
out
...
@@ -250,22 +251,23 @@ def binary_cross_entropy_with_logits(logit,
...
@@ -250,22 +251,23 @@ def binary_cross_entropy_with_logits(logit,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
one
=
_varbase_creator
(
dtype
=
logit
.
dtype
)
one
=
_varbase_creator
(
dtype
=
logit
.
dtype
)
core
.
ops
.
fill_constant
(
one
,
'value'
,
_C_
ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
[
1
])
'str_value'
,
'1.0'
,
'shape'
,
[
1
])
out
=
core
.
ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
out
=
_C_
ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
if
pos_weight
is
not
None
:
if
pos_weight
is
not
None
:
log_weight
=
core
.
ops
.
elementwise_add
(
log_weight
=
_C_ops
.
elementwise_add
(
core
.
ops
.
elementwise_mul
(
_C_ops
.
elementwise_mul
(
label
,
label
,
core
.
ops
.
elementwise_sub
(
pos_weight
,
one
)),
one
)
_C_ops
.
elementwise_sub
(
pos_weight
,
one
)),
out
=
core
.
ops
.
elementwise_mul
(
out
,
log_weight
)
one
)
out
=
_C_ops
.
elementwise_mul
(
out
,
log_weight
)
if
weight
is
not
None
:
if
weight
is
not
None
:
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight
)
if
reduction
==
"sum"
:
if
reduction
==
"sum"
:
return
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
elif
reduction
==
"mean"
:
elif
reduction
==
"mean"
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
else
:
else
:
return
out
return
out
...
@@ -393,7 +395,7 @@ def hsigmoid_loss(input,
...
@@ -393,7 +395,7 @@ def hsigmoid_loss(input,
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
hierarchical_sigmoid
(
out
,
_
,
_
=
_C_
ops
.
hierarchical_sigmoid
(
input
,
weight
,
label
,
path_table
,
path_code
,
bias
,
'num_classes'
,
input
,
weight
,
label
,
path_table
,
path_code
,
bias
,
'num_classes'
,
num_classes
,
'is_sparse'
,
is_sparse
,
'remote_prefetch'
,
is_sparse
)
num_classes
,
'is_sparse'
,
is_sparse
,
'remote_prefetch'
,
is_sparse
)
return
out
return
out
...
@@ -570,16 +572,16 @@ def margin_ranking_loss(input,
...
@@ -570,16 +572,16 @@ def margin_ranking_loss(input,
"The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but "
"The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed."
%
reduction
)
"received %s, which is not allowed."
%
reduction
)
if
fluid
.
framework
.
in_dygraph_mode
():
if
fluid
.
framework
.
in_dygraph_mode
():
out
=
core
.
ops
.
elementwise_sub
(
other
,
input
)
out
=
_C_
ops
.
elementwise_sub
(
other
,
input
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
label
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
label
)
if
margin
!=
0.0
:
if
margin
!=
0.0
:
margin
=
fluid
.
dygraph
.
base
.
to_variable
([
margin
],
dtype
=
out
.
dtype
)
margin
=
fluid
.
dygraph
.
base
.
to_variable
([
margin
],
dtype
=
out
.
dtype
)
out
=
core
.
ops
.
elementwise_add
(
out
,
margin
)
out
=
_C_
ops
.
elementwise_add
(
out
,
margin
)
out
=
core
.
ops
.
relu
(
out
)
out
=
_C_
ops
.
relu
(
out
)
if
reduction
==
'sum'
:
if
reduction
==
'sum'
:
return
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
elif
reduction
==
'mean'
:
elif
reduction
==
'mean'
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
return
out
return
out
helper
=
LayerHelper
(
"margin_ranking_loss"
,
**
locals
())
helper
=
LayerHelper
(
"margin_ranking_loss"
,
**
locals
())
...
@@ -690,10 +692,10 @@ def l1_loss(input, label, reduction='mean', name=None):
...
@@ -690,10 +692,10 @@ def l1_loss(input, label, reduction='mean', name=None):
unreduced
=
_elementwise_op_in_dygraph
(
unreduced
=
_elementwise_op_in_dygraph
(
input
,
label
,
axis
=-
1
,
act
=
'abs'
,
op_name
=
'elementwise_sub'
)
input
,
label
,
axis
=-
1
,
act
=
'abs'
,
op_name
=
'elementwise_sub'
)
if
reduction
==
'mean'
:
if
reduction
==
'mean'
:
return
core
.
ops
.
mean
(
unreduced
)
return
_C_
ops
.
mean
(
unreduced
)
elif
reduction
==
'sum'
:
elif
reduction
==
'sum'
:
return
core
.
ops
.
reduce_sum
(
unreduced
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
return
_C_
ops
.
reduce_sum
(
unreduced
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
'reduce_all'
,
True
)
'reduce_all'
,
True
)
else
:
else
:
return
unreduced
return
unreduced
...
@@ -778,14 +780,14 @@ def nll_loss(input,
...
@@ -778,14 +780,14 @@ def nll_loss(input,
c
=
input_shape
[
1
]
c
=
input_shape
[
1
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
input_dims
!=
2
and
input_dims
!=
4
:
if
input_dims
!=
2
and
input_dims
!=
4
:
input
,
_
=
core
.
ops
.
reshape2
(
input
,
None
,
'shape'
,
[
n
,
c
,
1
,
-
1
])
input
,
_
=
_C_
ops
.
reshape2
(
input
,
None
,
'shape'
,
[
n
,
c
,
1
,
-
1
])
label
,
_
=
core
.
ops
.
reshape2
(
label
,
None
,
'shape'
,
[
n
,
1
,
-
1
])
label
,
_
=
_C_
ops
.
reshape2
(
label
,
None
,
'shape'
,
[
n
,
1
,
-
1
])
out_shape
=
[
n
]
+
input_shape
[
2
:]
out_shape
=
[
n
]
+
input_shape
[
2
:]
out
,
total_weight
=
core
.
ops
.
nll_loss
(
input
,
label
,
weight
,
out
,
total_weight
=
_C_
ops
.
nll_loss
(
input
,
label
,
weight
,
'ignore_index'
,
ignore_index
,
'ignore_index'
,
ignore_index
,
'reduction'
,
reduction
)
'reduction'
,
reduction
)
if
input_dims
!=
2
and
input_dims
!=
4
and
reduction
==
'none'
:
if
input_dims
!=
2
and
input_dims
!=
4
and
reduction
==
'none'
:
out
,
_
=
core
.
ops
.
reshape2
(
out
,
None
,
'shape'
,
out_shape
)
out
,
_
=
_C_
ops
.
reshape2
(
out
,
None
,
'shape'
,
out_shape
)
return
out
return
out
helper
=
LayerHelper
(
'nll_loss'
,
**
locals
())
helper
=
LayerHelper
(
'nll_loss'
,
**
locals
())
...
@@ -903,7 +905,7 @@ def kl_div(input, label, reduction='mean', name=None):
...
@@ -903,7 +905,7 @@ def kl_div(input, label, reduction='mean', name=None):
label
=
fluid
.
layers
.
cast
(
label
,
'float64'
)
label
=
fluid
.
layers
.
cast
(
label
,
'float64'
)
if
paddle
.
in_dynamic_mode
():
if
paddle
.
in_dynamic_mode
():
out
=
core
.
ops
.
kldiv_loss
(
input
,
label
,
'reduction'
,
reduction
)
out
=
_C_
ops
.
kldiv_loss
(
input
,
label
,
'reduction'
,
reduction
)
return
out
return
out
helper
=
LayerHelper
(
'kl_div'
,
**
locals
())
helper
=
LayerHelper
(
'kl_div'
,
**
locals
())
...
@@ -1386,7 +1388,7 @@ def cross_entropy(input,
...
@@ -1386,7 +1388,7 @@ def cross_entropy(input,
if
input_dims
-
1
==
label_dims
:
if
input_dims
-
1
==
label_dims
:
label
=
paddle
.
unsqueeze
(
label
,
axis
=
axis
)
label
=
paddle
.
unsqueeze
(
label
,
axis
=
axis
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
_
,
out
=
core
.
ops
.
softmax_with_cross_entropy
(
_
,
out
=
_C_
ops
.
softmax_with_cross_entropy
(
input
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
input
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
ignore_index
,
'numeric_stable_mode'
,
True
,
'axis'
,
axis
,
ignore_index
,
'numeric_stable_mode'
,
True
,
'axis'
,
axis
,
'use_softmax'
,
use_softmax
)
'use_softmax'
,
use_softmax
)
...
@@ -1408,7 +1410,7 @@ def cross_entropy(input,
...
@@ -1408,7 +1410,7 @@ def cross_entropy(input,
weight_gather_reshape
=
reshape
(
weight_gather
,
shape
=
out_shape
)
weight_gather_reshape
=
reshape
(
weight_gather
,
shape
=
out_shape
)
out
=
paddle
.
cast
(
out
,
weight_gather_reshape
.
dtype
)
out
=
paddle
.
cast
(
out
,
weight_gather_reshape
.
dtype
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
else
:
else
:
label_min
=
paddle
.
min
(
label
)
label_min
=
paddle
.
min
(
label
)
...
@@ -1418,18 +1420,18 @@ def cross_entropy(input,
...
@@ -1418,18 +1420,18 @@ def cross_entropy(input,
'Expected 0 <= label_value < class_dimension({}), but got {} <= label_value <= {} '
.
'Expected 0 <= label_value < class_dimension({}), but got {} <= label_value <= {} '
.
format
(
input
.
shape
[
-
1
],
format
(
input
.
shape
[
-
1
],
label_min
.
numpy
(),
label_max
.
numpy
()))
label_min
.
numpy
(),
label_max
.
numpy
()))
weight_gather
=
core
.
ops
.
gather_nd
(
weight
,
label
)
weight_gather
=
_C_
ops
.
gather_nd
(
weight
,
label
)
input_shape
=
list
(
label
.
shape
)
input_shape
=
list
(
label
.
shape
)
weight_gather_reshape
=
reshape
(
weight_gather_reshape
=
reshape
(
weight_gather
,
shape
=
input_shape
)
weight_gather
,
shape
=
input_shape
)
out
=
paddle
.
cast
(
out
,
weight_gather_reshape
.
dtype
)
out
=
paddle
.
cast
(
out
,
weight_gather_reshape
.
dtype
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
if
reduction
==
"sum"
:
if
reduction
==
"sum"
:
# because of fluid_softmax_with_cross_entropy op's inner logic,
# because of fluid_softmax_with_cross_entropy op's inner logic,
# in the out tensor of this op, the loss of sample with class_index==ignore_index is 0
# in the out tensor of this op, the loss of sample with class_index==ignore_index is 0
# so, reduce_sum all directly is ok
# so, reduce_sum all directly is ok
return
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
elif
reduction
==
"mean"
:
elif
reduction
==
"mean"
:
#1. if weight==none,
#1. if weight==none,
# numerator: reduce_sum all loss directly is ok causeof fluid_softmax_with_cross_entropy's inner logic
# numerator: reduce_sum all loss directly is ok causeof fluid_softmax_with_cross_entropy's inner logic
...
@@ -1438,30 +1440,30 @@ def cross_entropy(input,
...
@@ -1438,30 +1440,30 @@ def cross_entropy(input,
# numerator: loss's weighted sum
# numerator: loss's weighted sum
# denominator: cal the sum of weight where the sample's class_index!=ignore_index
# denominator: cal the sum of weight where the sample's class_index!=ignore_index
if
ignore_index
!=
-
100
:
if
ignore_index
!=
-
100
:
out_sum
=
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
out_sum
=
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
#for each label[i],set 1 or 0, according to ignore_index
#for each label[i],set 1 or 0, according to ignore_index
#mask[i]=0, if label[i]==ignore_index
#mask[i]=0, if label[i]==ignore_index
#mask[i]=1, otherwise
#mask[i]=1, otherwise
mask
=
(
label
!=
ignore_index
)
mask
=
(
label
!=
ignore_index
)
if
weight
is
None
:
if
weight
is
None
:
mask
=
paddle
.
cast
(
mask
,
dtype
=
out_sum
.
dtype
)
mask
=
paddle
.
cast
(
mask
,
dtype
=
out_sum
.
dtype
)
count
=
core
.
ops
.
reduce_sum
(
mask
,
'reduce_all'
,
True
)
count
=
_C_
ops
.
reduce_sum
(
mask
,
'reduce_all'
,
True
)
ret
=
out_sum
/
(
count
+
(
count
==
0.0
))
ret
=
out_sum
/
(
count
+
(
count
==
0.0
))
else
:
else
:
mask
=
paddle
.
cast
(
mask
,
weight_gather_reshape
.
dtype
)
mask
=
paddle
.
cast
(
mask
,
weight_gather_reshape
.
dtype
)
weight_ignored
=
core
.
ops
.
elementwise_mul
(
weight_ignored
=
_C_
ops
.
elementwise_mul
(
mask
,
weight_gather_reshape
)
mask
,
weight_gather_reshape
)
weight_sum
=
core
.
ops
.
reduce_sum
(
weight_ignored
,
weight_sum
=
_C_ops
.
reduce_sum
(
weight_ignored
,
'reduce_all'
,
'reduce_all'
,
True
)
True
)
ret
=
out_sum
/
(
weight_sum
+
(
weight_sum
==
0.0
))
ret
=
out_sum
/
(
weight_sum
+
(
weight_sum
==
0.0
))
return
ret
return
ret
elif
weight
is
not
None
:
elif
weight
is
not
None
:
out_sum
=
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
out_sum
=
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
total_weight
=
core
.
ops
.
reduce_sum
(
weight_gather_reshape
,
total_weight
=
_C_
ops
.
reduce_sum
(
weight_gather_reshape
,
'reduce_all'
,
True
)
'reduce_all'
,
True
)
return
out_sum
/
(
total_weight
+
(
total_weight
==
0.0
))
return
out_sum
/
(
total_weight
+
(
total_weight
==
0.0
))
else
:
else
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
else
:
else
:
if
input_dims
-
1
==
label_dims
:
if
input_dims
-
1
==
label_dims
:
...
@@ -1645,38 +1647,37 @@ def sigmoid_focal_loss(logit,
...
@@ -1645,38 +1647,37 @@ def sigmoid_focal_loss(logit,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
one
=
_varbase_creator
(
dtype
=
logit
.
dtype
)
one
=
_varbase_creator
(
dtype
=
logit
.
dtype
)
core
.
ops
.
fill_constant
(
one
,
'value'
,
_C_ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
'str_value'
,
'1.0'
,
'shape'
,
logit
.
shape
)
logit
.
shape
)
loss
=
_C_ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
loss
=
core
.
ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
pred
=
_C_ops
.
sigmoid
(
logit
)
pred
=
core
.
ops
.
sigmoid
(
logit
)
p_t
=
_C_ops
.
elementwise_add
(
p_t
=
core
.
ops
.
elementwise_add
(
_C_ops
.
elementwise_mul
(
pred
,
label
),
core
.
ops
.
elementwise_mul
(
pred
,
label
),
_C_ops
.
elementwise_mul
(
core
.
ops
.
elementwise_mul
(
_C_ops
.
elementwise_sub
(
one
,
pred
),
core
.
ops
.
elementwise_sub
(
one
,
pred
),
_C_ops
.
elementwise_sub
(
one
,
label
)))
core
.
ops
.
elementwise_sub
(
one
,
label
)))
alpha
=
fluid
.
dygraph
.
base
.
to_variable
([
alpha
],
dtype
=
loss
.
dtype
)
alpha
=
fluid
.
dygraph
.
base
.
to_variable
([
alpha
],
dtype
=
loss
.
dtype
)
alpha_t
=
core
.
ops
.
elementwise_add
(
alpha_t
=
_C_
ops
.
elementwise_add
(
core
.
ops
.
elementwise_mul
(
alpha
,
label
),
_C_
ops
.
elementwise_mul
(
alpha
,
label
),
core
.
ops
.
elementwise_mul
(
_C_
ops
.
elementwise_mul
(
core
.
ops
.
elementwise_sub
(
one
,
alpha
),
_C_
ops
.
elementwise_sub
(
one
,
alpha
),
core
.
ops
.
elementwise_sub
(
one
,
label
)))
_C_
ops
.
elementwise_sub
(
one
,
label
)))
loss
=
core
.
ops
.
elementwise_mul
(
alpha_t
,
loss
)
loss
=
_C_
ops
.
elementwise_mul
(
alpha_t
,
loss
)
gamma
=
fluid
.
dygraph
.
base
.
to_variable
([
gamma
],
dtype
=
loss
.
dtype
)
gamma
=
fluid
.
dygraph
.
base
.
to_variable
([
gamma
],
dtype
=
loss
.
dtype
)
gamma_t
=
core
.
ops
.
elementwise_pow
(
gamma_t
=
_C_
ops
.
elementwise_pow
(
core
.
ops
.
elementwise_sub
(
one
,
p_t
),
gamma
)
_C_
ops
.
elementwise_sub
(
one
,
p_t
),
gamma
)
loss
=
core
.
ops
.
elementwise_mul
(
gamma_t
,
loss
)
loss
=
_C_
ops
.
elementwise_mul
(
gamma_t
,
loss
)
if
normalizer
is
not
None
:
if
normalizer
is
not
None
:
loss
=
core
.
ops
.
elementwise_div
(
loss
,
normalizer
)
loss
=
_C_
ops
.
elementwise_div
(
loss
,
normalizer
)
if
reduction
==
"sum"
:
if
reduction
==
"sum"
:
return
core
.
ops
.
reduce_sum
(
loss
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
loss
,
'reduce_all'
,
True
)
elif
reduction
==
"mean"
:
elif
reduction
==
"mean"
:
return
core
.
ops
.
mean
(
loss
)
return
_C_
ops
.
mean
(
loss
)
return
loss
return
loss
...
...
python/paddle/nn/functional/norm.py
浏览文件 @
f05098b5
...
@@ -23,6 +23,7 @@ from ...fluid.initializer import Constant
...
@@ -23,6 +23,7 @@ from ...fluid.initializer import Constant
from
...fluid.param_attr
import
ParamAttr
from
...fluid.param_attr
import
ParamAttr
from
...fluid
import
core
,
dygraph_utils
from
...fluid
import
core
,
dygraph_utils
import
numbers
import
numbers
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -79,9 +80,9 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
...
@@ -79,9 +80,9 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
eps
=
fluid
.
dygraph
.
base
.
to_variable
([
epsilon
],
dtype
=
x
.
dtype
)
eps
=
fluid
.
dygraph
.
base
.
to_variable
([
epsilon
],
dtype
=
x
.
dtype
)
out
=
core
.
ops
.
p_norm
(
x
,
'axis'
,
axis
,
'porder'
,
out
=
_C_
ops
.
p_norm
(
x
,
'axis'
,
axis
,
'porder'
,
float
(
p
),
'keepdim'
,
True
,
'epsilon'
,
epsilon
)
float
(
p
),
'keepdim'
,
True
,
'epsilon'
,
epsilon
)
return
x
/
core
.
ops
.
elementwise_max
(
out
,
eps
)
return
x
/
_C_
ops
.
elementwise_max
(
out
,
eps
)
check_type
(
p
,
'p'
,
(
float
,
int
),
'normalize'
)
check_type
(
p
,
'p'
,
(
float
,
int
),
'normalize'
)
check_type
(
axis
,
'axis'
,
(
int
),
'normalize'
)
check_type
(
axis
,
'axis'
,
(
int
),
'normalize'
)
...
@@ -185,7 +186,7 @@ def batch_norm(x,
...
@@ -185,7 +186,7 @@ def batch_norm(x,
not
training
,
"data_layout"
,
data_format
,
"use_mkldnn"
,
False
,
not
training
,
"data_layout"
,
data_format
,
"use_mkldnn"
,
False
,
"fuse_with_relu"
,
False
,
"use_global_stats"
,
use_global_stats
,
"fuse_with_relu"
,
False
,
"use_global_stats"
,
use_global_stats
,
"trainable_statistics"
,
trainable_statistics
)
"trainable_statistics"
,
trainable_statistics
)
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
batch_norm
(
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
batch_norm
(
x
,
weight
,
bias
,
running_mean
,
running_var
,
mean_out
,
variance_out
,
x
,
weight
,
bias
,
running_mean
,
running_var
,
mean_out
,
variance_out
,
*
attrs
)
*
attrs
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
@@ -301,8 +302,8 @@ def layer_norm(x,
...
@@ -301,8 +302,8 @@ def layer_norm(x,
1
:]
+
', but got input shape '
+
str
(
input_shape
))
1
:]
+
', but got input shape '
+
str
(
input_shape
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pre_act
,
_
,
_
=
core
.
ops
.
layer_norm
(
x
,
weight
,
bias
,
'epsilon'
,
epsilon
,
pre_act
,
_
,
_
=
_C_
ops
.
layer_norm
(
x
,
weight
,
bias
,
'epsilon'
,
epsilon
,
'begin_norm_axis'
,
begin_norm_axis
)
'begin_norm_axis'
,
begin_norm_axis
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
pre_act
,
act
=
None
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
pre_act
,
act
=
None
)
check_variable_and_dtype
(
x
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'input'
,
[
'float16'
,
'float32'
,
'float64'
],
...
@@ -385,9 +386,9 @@ def instance_norm(x,
...
@@ -385,9 +386,9 @@ def instance_norm(x,
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
instance_norm
(
x
,
weight
,
bias
,
"epsilon"
,
eps
,
out
,
_
,
_
=
_C_
ops
.
instance_norm
(
x
,
weight
,
bias
,
"epsilon"
,
eps
,
"momentum"
,
momentum
,
"data_format"
,
"momentum"
,
momentum
,
"data_format"
,
data_format
)
data_format
)
return
out
return
out
check_variable_and_dtype
(
x
,
'input'
,
[
'float32'
,
'float64'
],
"InstanceNorm"
)
check_variable_and_dtype
(
x
,
'input'
,
[
'float32'
,
'float64'
],
"InstanceNorm"
)
...
...
python/paddle/nn/functional/pooling.py
浏览文件 @
f05098b5
...
@@ -17,6 +17,8 @@ from ...fluid import core
...
@@ -17,6 +17,8 @@ from ...fluid import core
from
...fluid.framework
import
in_dygraph_mode
from
...fluid.framework
import
in_dygraph_mode
from
...fluid.layers
import
utils
,
LayerHelper
,
unsqueeze
,
squeeze
from
...fluid.layers
import
utils
,
LayerHelper
,
unsqueeze
,
squeeze
from
...fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
...fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle
import
_C_ops
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -216,7 +218,7 @@ def avg_pool1d(x,
...
@@ -216,7 +218,7 @@ def avg_pool1d(x,
padding
=
_expand_low_nd_padding
(
padding
)
padding
=
_expand_low_nd_padding
(
padding
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool2d
(
output
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
@@ -327,12 +329,12 @@ def avg_pool2d(x,
...
@@ -327,12 +329,12 @@ def avg_pool2d(x,
padding
,
2
,
channel_last
,
ceil_mode
=
ceil_mode
)
padding
,
2
,
channel_last
,
ceil_mode
=
ceil_mode
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool2d
(
output
=
_C_ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'global_pooling
'
,
'global_pooling'
,
False
,
'padding_algorithm
'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
padding
,
'use_cudnn'
,
True
,
'ceil_mode'
,
'use_mkldnn'
,
False
,
'exclusive'
,
exclusive
,
'data_format
'
,
ceil_mode
,
'use_mkldnn'
,
False
,
'exclusive
'
,
data_format
)
exclusive
,
'data_format'
,
data_format
)
if
divisor_override
is
None
:
if
divisor_override
is
None
:
return
output
return
output
else
:
else
:
...
@@ -446,7 +448,7 @@ def avg_pool3d(x,
...
@@ -446,7 +448,7 @@ def avg_pool3d(x,
padding
,
3
,
channel_last
=
channel_last
,
ceil_mode
=
ceil_mode
)
padding
,
3
,
channel_last
=
channel_last
,
ceil_mode
=
ceil_mode
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool3d
(
output
=
_C_
ops
.
pool3d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'strides'
,
stride
,
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'strides'
,
stride
,
'paddings'
,
padding
,
'global_pooling'
,
False
,
'padding_algorithm'
,
'paddings'
,
padding
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
@@ -566,7 +568,7 @@ def max_pool1d(x,
...
@@ -566,7 +568,7 @@ def max_pool1d(x,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
return_mask
:
if
return_mask
:
pool_out
=
core
.
ops
.
max_pool2d_with_index
(
pool_out
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
x
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
@@ -576,7 +578,7 @@ def max_pool1d(x,
...
@@ -576,7 +578,7 @@ def max_pool1d(x,
squeeze
(
pool_out
[
1
],
squeeze
(
pool_out
[
1
],
[
2
]))
if
return_mask
else
squeeze
(
pool_out
[
0
],
[
2
])
[
2
]))
if
return_mask
else
squeeze
(
pool_out
[
0
],
[
2
])
else
:
else
:
pool_out
=
core
.
ops
.
pool2d
(
pool_out
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
...
@@ -704,7 +706,7 @@ def max_pool2d(x,
...
@@ -704,7 +706,7 @@ def max_pool2d(x,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
return_mask
:
if
return_mask
:
output
=
core
.
ops
.
max_pool2d_with_index
(
output
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
x
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
@@ -712,7 +714,7 @@ def max_pool2d(x,
...
@@ -712,7 +714,7 @@ def max_pool2d(x,
data_format
)
data_format
)
return
output
if
return_mask
else
output
[
0
]
return
output
if
return_mask
else
output
[
0
]
else
:
else
:
output
=
core
.
ops
.
pool2d
(
output
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
...
@@ -835,7 +837,7 @@ def max_pool3d(x,
...
@@ -835,7 +837,7 @@ def max_pool3d(x,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
return_mask
:
if
return_mask
:
output
=
core
.
ops
.
max_pool3d_with_index
(
output
=
_C_
ops
.
max_pool3d_with_index
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'strides'
,
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'strides'
,
stride
,
'paddings'
,
padding
,
'global_pooling'
,
False
,
stride
,
'paddings'
,
padding
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
...
@@ -843,7 +845,7 @@ def max_pool3d(x,
...
@@ -843,7 +845,7 @@ def max_pool3d(x,
'data_format'
,
data_format
)
'data_format'
,
data_format
)
return
output
if
return_mask
else
output
[
0
]
return
output
if
return_mask
else
output
[
0
]
else
:
else
:
output
=
core
.
ops
.
pool3d
(
output
=
_C_
ops
.
pool3d
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
...
@@ -932,8 +934,8 @@ def adaptive_avg_pool1d(x, output_size, name=None):
...
@@ -932,8 +934,8 @@ def adaptive_avg_pool1d(x, output_size, name=None):
x
=
unsqueeze
(
x
,
[
2
])
x
=
unsqueeze
(
x
,
[
2
])
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
pool2d
(
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_out
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_size
,
'adaptive'
,
True
)
pool_size
,
'adaptive'
,
True
)
return
squeeze
(
pool_out
,
[
2
])
return
squeeze
(
pool_out
,
[
2
])
l_type
=
"pool2d"
l_type
=
"pool2d"
...
@@ -1031,9 +1033,9 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
...
@@ -1031,9 +1033,9 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
output_size
[
1
]
=
in_w
output_size
[
1
]
=
in_w
if
in_dygraph_mode
():
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
output
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
'global_pooling'
,
False
,
'adaptive'
,
True
,
'global_pooling'
,
False
,
'adaptive'
,
True
,
'data_format'
,
data_format
)
'data_format'
,
data_format
)
return
output
return
output
l_type
=
'pool2d'
l_type
=
'pool2d'
...
@@ -1137,9 +1139,9 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
...
@@ -1137,9 +1139,9 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
output_size
[
2
]
=
in_w
output_size
[
2
]
=
in_w
if
in_dygraph_mode
():
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool3d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
output
=
_C_
ops
.
pool3d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
'global_pooling'
,
False
,
'adaptive'
,
True
,
'global_pooling'
,
False
,
'adaptive'
,
True
,
'data_format'
,
data_format
)
'data_format'
,
data_format
)
return
output
return
output
l_type
=
'pool3d'
l_type
=
'pool3d'
...
@@ -1221,7 +1223,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
...
@@ -1221,7 +1223,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
x
=
unsqueeze
(
x
,
[
2
])
x
=
unsqueeze
(
x
,
[
2
])
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
max_pool2d_with_index
(
pool_out
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_size
,
'adaptive'
,
True
)
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_size
,
'adaptive'
,
True
)
return
(
squeeze
(
pool_out
[
0
],
[
2
]),
squeeze
(
return
(
squeeze
(
pool_out
[
0
],
[
2
]),
squeeze
(
pool_out
[
1
],
[
2
]))
if
return_mask
else
squeeze
(
pool_out
[
0
],
[
2
])
pool_out
[
1
],
[
2
]))
if
return_mask
else
squeeze
(
pool_out
[
0
],
[
2
])
...
@@ -1310,7 +1312,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
...
@@ -1310,7 +1312,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
output_size
[
1
]
=
in_w
output_size
[
1
]
=
in_w
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
max_pool2d_with_index
(
pool_out
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
output_size
,
'adaptive'
,
True
)
x
,
'pooling_type'
,
'max'
,
'ksize'
,
output_size
,
'adaptive'
,
True
)
return
pool_out
if
return_mask
else
pool_out
[
0
]
return
pool_out
if
return_mask
else
pool_out
[
0
]
...
@@ -1403,7 +1405,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
...
@@ -1403,7 +1405,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
output_size
[
2
]
=
in_w
output_size
[
2
]
=
in_w
if
in_dygraph_mode
():
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
max_pool3d_with_index
(
pool_out
=
_C_
ops
.
max_pool3d_with_index
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
output_size
,
'adaptive'
,
True
)
x
,
'pooling_type'
,
'max'
,
'ksize'
,
output_size
,
'adaptive'
,
True
)
return
pool_out
if
return_mask
else
pool_out
[
0
]
return
pool_out
if
return_mask
else
pool_out
[
0
]
...
...
python/paddle/nn/functional/vision.py
浏览文件 @
f05098b5
...
@@ -18,6 +18,7 @@ from ...fluid.layer_helper import LayerHelper
...
@@ -18,6 +18,7 @@ from ...fluid.layer_helper import LayerHelper
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid
import
dygraph_utils
from
...fluid
import
dygraph_utils
import
numpy
as
np
import
numpy
as
np
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -91,9 +92,9 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
...
@@ -91,9 +92,9 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
_out_shape
=
out_shape
.
numpy
().
tolist
()
if
isinstance
(
_out_shape
=
out_shape
.
numpy
().
tolist
()
if
isinstance
(
out_shape
,
Variable
)
else
out_shape
out_shape
,
Variable
)
else
out_shape
return
core
.
ops
.
affine_grid
(
theta
,
"output_shape"
,
_out_shape
,
return
_C_
ops
.
affine_grid
(
theta
,
"output_shape"
,
_out_shape
,
"align_corners"
,
align_corners
,
"use_cudnn"
,
"align_corners"
,
align_corners
,
"use_cudnn"
,
use_cudnn
)
use_cudnn
)
helper
=
LayerHelper
(
'affine_grid'
)
helper
=
LayerHelper
(
'affine_grid'
)
check_variable_and_dtype
(
theta
,
'theta'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
theta
,
'theta'
,
[
'float32'
,
'float64'
],
...
@@ -272,7 +273,7 @@ def grid_sample(x,
...
@@ -272,7 +273,7 @@ def grid_sample(x,
if
in_dygraph_mode
():
if
in_dygraph_mode
():
attrs
=
(
'mode'
,
mode
,
'padding_mode'
,
padding_mode
,
'align_corners'
,
attrs
=
(
'mode'
,
mode
,
'padding_mode'
,
padding_mode
,
'align_corners'
,
align_corners
,
'use_cudnn'
,
use_cudnn
)
align_corners
,
'use_cudnn'
,
use_cudnn
)
out
=
getattr
(
core
.
ops
,
'grid_sampler'
)(
x
,
grid
,
*
attrs
)
out
=
getattr
(
_C_
ops
,
'grid_sampler'
)(
x
,
grid
,
*
attrs
)
else
:
else
:
helper
=
LayerHelper
(
"grid_sample"
,
**
locals
())
helper
=
LayerHelper
(
"grid_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'grid_sample'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'grid_sample'
)
...
@@ -328,8 +329,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
...
@@ -328,8 +329,8 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
data_format
))
data_format
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
pixel_shuffle
(
x
,
"upscale_factor"
,
upscale_factor
,
return
_C_
ops
.
pixel_shuffle
(
x
,
"upscale_factor"
,
upscale_factor
,
"data_format"
,
data_format
)
"data_format"
,
data_format
)
helper
=
LayerHelper
(
"pixel_shuffle"
,
**
locals
())
helper
=
LayerHelper
(
"pixel_shuffle"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'pixel_shuffle'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'pixel_shuffle'
)
...
...
python/paddle/nn/layer/distance.py
浏览文件 @
f05098b5
...
@@ -19,6 +19,7 @@ from ...fluid.dygraph import layers
...
@@ -19,6 +19,7 @@ from ...fluid.dygraph import layers
from
...fluid.framework
import
core
,
in_dygraph_mode
from
...fluid.framework
import
core
,
in_dygraph_mode
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
...fluid.layer_helper
import
LayerHelper
from
...fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -78,9 +79,9 @@ class PairwiseDistance(layers.Layer):
...
@@ -78,9 +79,9 @@ class PairwiseDistance(layers.Layer):
def
forward
(
self
,
x
,
y
):
def
forward
(
self
,
x
,
y
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
sub
=
core
.
ops
.
elementwise_sub
(
x
,
y
)
sub
=
_C_
ops
.
elementwise_sub
(
x
,
y
)
return
core
.
ops
.
p_norm
(
sub
,
'axis'
,
1
,
'porder'
,
self
.
p
,
'keepdim'
,
return
_C_
ops
.
p_norm
(
sub
,
'axis'
,
1
,
'porder'
,
self
.
p
,
'keepdim'
,
self
.
keepdim
,
'epsilon'
,
self
.
epsilon
)
self
.
keepdim
,
'epsilon'
,
self
.
epsilon
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'PairwiseDistance'
)
'PairwiseDistance'
)
...
...
python/paddle/nn/layer/norm.py
浏览文件 @
f05098b5
...
@@ -49,6 +49,7 @@ import numbers
...
@@ -49,6 +49,7 @@ import numbers
import
warnings
import
warnings
from
...fluid.dygraph.base
import
no_grad
from
...fluid.dygraph.base
import
no_grad
from
..
import
functional
as
F
from
..
import
functional
as
F
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -1083,7 +1084,7 @@ class SyncBatchNorm(_BatchNormBase):
...
@@ -1083,7 +1084,7 @@ class SyncBatchNorm(_BatchNormBase):
self
.
_data_format
,
"use_mkldnn"
,
False
,
"fuse_with_relu"
,
self
.
_data_format
,
"use_mkldnn"
,
False
,
"fuse_with_relu"
,
False
,
"use_global_stats"
,
False
,
'trainable_statistics'
,
False
,
"use_global_stats"
,
False
,
'trainable_statistics'
,
False
)
False
)
sync_batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
sync_batch_norm
(
sync_batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
sync_batch_norm
(
x
,
self
.
weight
,
self
.
bias
,
self
.
_mean
,
self
.
_variance
,
mean_out
,
x
,
self
.
weight
,
self
.
bias
,
self
.
_mean
,
self
.
_variance
,
mean_out
,
variance_out
,
*
attrs
)
variance_out
,
*
attrs
)
...
...
python/paddle/nn/layer/rnn.py
浏览文件 @
f05098b5
...
@@ -32,7 +32,7 @@ from paddle.fluid.dygraph import Layer, LayerList
...
@@ -32,7 +32,7 @@ from paddle.fluid.dygraph import Layer, LayerList
from
paddle.fluid.layers
import
utils
from
paddle.fluid.layers
import
utils
from
paddle.fluid.layers.utils
import
map_structure
,
flatten
,
pack_sequence_as
from
paddle.fluid.layers.utils
import
map_structure
,
flatten
,
pack_sequence_as
from
paddle.fluid.data_feeder
import
convert_dtype
from
paddle.fluid.data_feeder
import
convert_dtype
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -981,7 +981,7 @@ class RNNBase(LayerList):
...
@@ -981,7 +981,7 @@ class RNNBase(LayerList):
inputs
=
paddle
.
tensor
.
transpose
(
inputs
,
[
1
,
0
,
2
])
inputs
=
paddle
.
tensor
.
transpose
(
inputs
,
[
1
,
0
,
2
])
if
fluid
.
framework
.
in_dygraph_mode
():
if
fluid
.
framework
.
in_dygraph_mode
():
_
,
_
,
out
,
state
=
framework
.
core
.
ops
.
rnn
(
_
,
_
,
out
,
state
=
_C_
ops
.
rnn
(
inputs
,
initial_states
,
self
.
_all_weights
,
sequence_length
,
inputs
,
initial_states
,
self
.
_all_weights
,
sequence_length
,
self
.
_dropout_state
,
self
.
state_components
,
'dropout_prob'
,
self
.
_dropout_state
,
self
.
state_components
,
'dropout_prob'
,
self
.
dropout
,
'is_bidirec'
,
self
.
num_directions
==
2
,
self
.
dropout
,
'is_bidirec'
,
self
.
num_directions
==
2
,
...
...
python/paddle/nn/quant/quant_layers.py
浏览文件 @
f05098b5
...
@@ -24,6 +24,7 @@ from paddle.fluid.data_feeder import check_variable_and_dtype
...
@@ -24,6 +24,7 @@ from paddle.fluid.data_feeder import check_variable_and_dtype
from
paddle.nn
import
functional
as
F
from
paddle.nn
import
functional
as
F
import
logging
import
logging
from
paddle.fluid.log_helper
import
get_logger
from
paddle.fluid.log_helper
import
get_logger
from
paddle
import
_C_ops
__all__
=
[
__all__
=
[
'FakeQuantAbsMax'
,
'FakeQuantAbsMax'
,
...
@@ -91,8 +92,8 @@ class FakeQuantAbsMax(layers.Layer):
...
@@ -91,8 +92,8 @@ class FakeQuantAbsMax(layers.Layer):
dtype
=
self
.
_dtype
,
dtype
=
self
.
_dtype
,
persistable
=
False
)
persistable
=
False
)
out_scale
.
stop_gradient
=
True
out_scale
.
stop_gradient
=
True
out
,
_
,
=
core
.
ops
.
fake_quantize_dequantize_abs_max
(
out
,
_
,
=
_C_ops
.
fake_quantize_dequantize_abs_max
(
input
,
quant_out
,
input
,
quant_out
,
out_scale
,
*
attrs
)
out_scale
,
*
attrs
)
return
out
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
],
"FakeQuantAbsMax"
)
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
],
"FakeQuantAbsMax"
)
...
@@ -185,7 +186,7 @@ class FakeQuantMovingAverageAbsMax(layers.Layer):
...
@@ -185,7 +186,7 @@ class FakeQuantMovingAverageAbsMax(layers.Layer):
state
=
self
.
_state
if
self
.
training
else
None
state
=
self
.
_state
if
self
.
training
else
None
accum
=
self
.
_accum
if
self
.
training
else
None
accum
=
self
.
_accum
if
self
.
training
else
None
out
,
_
,
_
,
_
=
core
.
ops
.
fake_quantize_dequantize_moving_average_abs_max
(
out
,
_
,
_
,
_
=
_C_
ops
.
fake_quantize_dequantize_moving_average_abs_max
(
input
,
self
.
_scale
,
accum
,
state
,
quant_out
,
self
.
_scale
,
state
,
input
,
self
.
_scale
,
accum
,
state
,
quant_out
,
self
.
_scale
,
state
,
accum
,
*
attrs
)
accum
,
*
attrs
)
return
out
return
out
...
@@ -271,7 +272,7 @@ class FakeQuantChannelWiseAbsMax(layers.Layer):
...
@@ -271,7 +272,7 @@ class FakeQuantChannelWiseAbsMax(layers.Layer):
persistable
=
False
)
persistable
=
False
)
out_scale
.
stop_gradient
=
True
out_scale
.
stop_gradient
=
True
out
,
_
,
=
core
.
ops
.
fake_channel_wise_quantize_dequantize_abs_max
(
out
,
_
,
=
_C_
ops
.
fake_channel_wise_quantize_dequantize_abs_max
(
input
,
quant_out
,
out_scale
,
*
attrs
)
input
,
quant_out
,
out_scale
,
*
attrs
)
return
out
return
out
...
@@ -355,7 +356,7 @@ class MovingAverageAbsMaxScale(layers.Layer):
...
@@ -355,7 +356,7 @@ class MovingAverageAbsMaxScale(layers.Layer):
dtype
=
input
.
dtype
,
dtype
=
input
.
dtype
,
persistable
=
False
)
persistable
=
False
)
out
,
_
,
_
,
_
=
core
.
ops
.
moving_average_abs_max_scale
(
out
,
_
,
_
,
_
=
_C_
ops
.
moving_average_abs_max_scale
(
input
,
accum
,
state
,
quant_out
,
self
.
_scale
,
state
,
accum
,
input
,
accum
,
state
,
quant_out
,
self
.
_scale
,
state
,
accum
,
*
attrs
)
*
attrs
)
return
out
return
out
...
...
python/paddle/optimizer/adam.py
浏览文件 @
f05098b5
...
@@ -24,6 +24,7 @@ from ..fluid.dygraph import base as imperative_base
...
@@ -24,6 +24,7 @@ from ..fluid.dygraph import base as imperative_base
from
collections
import
defaultdict
from
collections
import
defaultdict
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -316,7 +317,7 @@ class Adam(Optimizer):
...
@@ -316,7 +317,7 @@ class Adam(Optimizer):
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
_beta2
=
self
.
_beta2
if
not
isinstance
(
_beta2
=
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
adam
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
adam
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'epsilon'
,
self
.
_epsilon
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'epsilon'
,
self
.
_epsilon
,
...
...
python/paddle/optimizer/lamb.py
浏览文件 @
f05098b5
...
@@ -16,6 +16,7 @@ from .optimizer import Optimizer
...
@@ -16,6 +16,7 @@ from .optimizer import Optimizer
from
..fluid
import
core
from
..fluid
import
core
from
..fluid
import
framework
from
..fluid
import
framework
from
..fluid.framework
import
Variable
from
..fluid.framework
import
Variable
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -199,7 +200,7 @@ class Lamb(Optimizer):
...
@@ -199,7 +200,7 @@ class Lamb(Optimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
lamb
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
lamb
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'beta1'
,
self
.
_beta1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'beta1'
,
self
.
_beta1
,
...
...
python/paddle/optimizer/momentum.py
浏览文件 @
f05098b5
...
@@ -23,6 +23,7 @@ from ..fluid import unique_name
...
@@ -23,6 +23,7 @@ from ..fluid import unique_name
from
..fluid
import
layers
from
..fluid
import
layers
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
from
paddle.fluid.regularizer
import
L2DecayRegularizer
from
paddle.fluid.regularizer
import
L2DecayRegularizer
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -293,7 +294,7 @@ class Momentum(Optimizer):
...
@@ -293,7 +294,7 @@ class Momentum(Optimizer):
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
if
isinstance
(
param_and_grad
,
dict
):
if
isinstance
(
param_and_grad
,
dict
):
self
.
_update_regularization
(
param_and_grad
[
'weight_decay'
])
self
.
_update_regularization
(
param_and_grad
[
'weight_decay'
])
_
,
_
=
core
.
ops
.
momentum
(
_
,
_
=
_C_
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
'use_nesterov'
,
self
.
_use_nesterov
,
'regularization_method'
,
'use_nesterov'
,
self
.
_use_nesterov
,
'regularization_method'
,
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
f05098b5
...
@@ -41,6 +41,7 @@ from ..fluid.wrapped_decorator import signature_safe_contextmanager
...
@@ -41,6 +41,7 @@ from ..fluid.wrapped_decorator import signature_safe_contextmanager
from
..
import
compat
as
cpt
from
..
import
compat
as
cpt
from
.lr
import
LRScheduler
from
.lr
import
LRScheduler
import
copy
import
copy
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -916,7 +917,7 @@ class Optimizer(object):
...
@@ -916,7 +917,7 @@ class Optimizer(object):
assert
regularization_term
is
not
None
assert
regularization_term
is
not
None
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
return
core
.
ops
.
sum
([
grad
,
regularization_term
])
return
_C_
ops
.
sum
([
grad
,
regularization_term
])
new_grad
=
grad
new_grad
=
grad
if
grad
.
type
==
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
:
if
grad
.
type
==
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
:
...
...
python/paddle/optimizer/sgd.py
浏览文件 @
f05098b5
...
@@ -17,6 +17,7 @@ from ..fluid import core
...
@@ -17,6 +17,7 @@ from ..fluid import core
from
..fluid
import
framework
from
..fluid
import
framework
from
..fluid.framework
import
Variable
,
name_scope
from
..fluid.framework
import
Variable
,
name_scope
from
..fluid.dygraph
import
no_grad
from
..fluid.dygraph
import
no_grad
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -91,8 +92,8 @@ class SGD(Optimizer):
...
@@ -91,8 +92,8 @@ class SGD(Optimizer):
param_and_grad
=
self
.
_update_param_group
(
param_and_grad
)
param_and_grad
=
self
.
_update_param_group
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
if
framework
.
in_dygraph_mode
():
core
.
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
_C_
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
param_and_grad
[
0
])
param_and_grad
[
0
])
return
None
return
None
assert
isinstance
(
block
,
framework
.
Block
)
assert
isinstance
(
block
,
framework
.
Block
)
...
...
python/paddle/tensor/attribute.py
浏览文件 @
f05098b5
...
@@ -21,6 +21,7 @@ from ..fluid.data_feeder import check_variable_and_dtype
...
@@ -21,6 +21,7 @@ from ..fluid.data_feeder import check_variable_and_dtype
# TODO: define functions to get tensor attributes
# TODO: define functions to get tensor attributes
from
..fluid.layers
import
rank
# noqa: F401
from
..fluid.layers
import
rank
# noqa: F401
from
..fluid.layers
import
shape
# noqa: F401
from
..fluid.layers
import
shape
# noqa: F401
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -68,7 +69,7 @@ def real(x, name=None):
...
@@ -68,7 +69,7 @@ def real(x, name=None):
# [4., 5., 6.]])
# [4., 5., 6.]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
real
(
x
)
return
_C_
ops
.
real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
helper
=
LayerHelper
(
'real'
,
**
locals
())
helper
=
LayerHelper
(
'real'
,
**
locals
())
...
@@ -112,7 +113,7 @@ def imag(x, name=None):
...
@@ -112,7 +113,7 @@ def imag(x, name=None):
# [3., 2., 1.]])
# [3., 2., 1.]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
imag
(
x
)
return
_C_
ops
.
imag
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
helper
=
LayerHelper
(
'imag'
,
**
locals
())
helper
=
LayerHelper
(
'imag'
,
**
locals
())
...
...
python/paddle/tensor/creation.py
浏览文件 @
f05098b5
...
@@ -30,6 +30,7 @@ from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varb
...
@@ -30,6 +30,7 @@ from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varb
# TODO: define functions to get create a tensor
# TODO: define functions to get create a tensor
from
..fluid.layers
import
linspace
# noqa: F401
from
..fluid.layers
import
linspace
# noqa: F401
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -207,7 +208,7 @@ def full_like(x, fill_value, dtype=None, name=None):
...
@@ -207,7 +208,7 @@ def full_like(x, fill_value, dtype=None, name=None):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
return
_C_
ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -639,7 +640,7 @@ def tril(x, diagonal=0, name=None):
...
@@ -639,7 +640,7 @@ def tril(x, diagonal=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
'tril_triu'
)
op
=
getattr
(
_C_
ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
...
@@ -706,7 +707,7 @@ def triu(x, diagonal=0, name=None):
...
@@ -706,7 +707,7 @@ def triu(x, diagonal=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
'tril_triu'
)
op
=
getattr
(
_C_
ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
...
@@ -749,7 +750,7 @@ def meshgrid(*args, **kwargs):
...
@@ -749,7 +750,7 @@ def meshgrid(*args, **kwargs):
args
=
args
[
0
]
args
=
args
[
0
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
num
=
len
(
args
)
num
=
len
(
args
)
out
=
core
.
ops
.
meshgrid
(
list
(
args
),
num
)
out
=
_C_
ops
.
meshgrid
(
list
(
args
),
num
)
return
out
return
out
name
=
kwargs
.
get
(
"name"
,
None
)
name
=
kwargs
.
get
(
"name"
,
None
)
...
@@ -854,13 +855,13 @@ def diagflat(x, offset=0, name=None):
...
@@ -854,13 +855,13 @@ def diagflat(x, offset=0, name=None):
padding_value
=
0
padding_value
=
0
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
len
(
x
.
shape
)
==
1
:
if
len
(
x
.
shape
)
==
1
:
return
core
.
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
return
_C_
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
padding_value
)
else
:
else
:
y
,
_
=
core
.
ops
.
flatten_contiguous_range
(
x
,
"start_axis"
,
0
,
y
,
_
=
_C_
ops
.
flatten_contiguous_range
(
x
,
"start_axis"
,
0
,
"stop_axis"
,
-
1
)
"stop_axis"
,
-
1
)
return
core
.
ops
.
diag_v2
(
y
,
"offset"
,
offset
,
"padding_value"
,
return
_C_
ops
.
diag_v2
(
y
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
padding_value
)
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -967,8 +968,8 @@ def diag(x, offset=0, padding_value=0, name=None):
...
@@ -967,8 +968,8 @@ def diag(x, offset=0, padding_value=0, name=None):
# [4]
# [4]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
return
_C_
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
padding_value
)
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_dtype
(
x
.
dtype
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -1049,8 +1050,8 @@ def empty(shape, dtype=None, name=None):
...
@@ -1049,8 +1050,8 @@ def empty(shape, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
core
.
ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
out
=
_C_
ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
...
@@ -1116,8 +1117,8 @@ def empty_like(x, dtype=None, name=None):
...
@@ -1116,8 +1117,8 @@ def empty_like(x, dtype=None, name=None):
dtype
=
convert_dtype
(
dtype
)
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
out
=
_C_
ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
out
.
stop_gradient
=
True
return
out
return
out
...
...
python/paddle/tensor/linalg.py
浏览文件 @
f05098b5
...
@@ -20,6 +20,7 @@ from ..fluid.framework import in_dygraph_mode, _varbase_creator
...
@@ -20,6 +20,7 @@ from ..fluid.framework import in_dygraph_mode, _varbase_creator
from
..fluid.layers
import
transpose
# noqa: F401
from
..fluid.layers
import
transpose
# noqa: F401
from
paddle.common_ops_import
import
core
from
paddle.common_ops_import
import
core
from
paddle.common_ops_import
import
VarDesc
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -131,7 +132,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
...
@@ -131,7 +132,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
"""
op_type
=
'matmul_v2'
op_type
=
'matmul_v2'
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_type
)
op
=
getattr
(
_C_
ops
,
op_type
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
attrs
=
{
attrs
=
{
...
@@ -244,10 +245,10 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -244,10 +245,10 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
)
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
dim
is
None
:
if
dim
is
None
:
return
core
.
ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
'reduce_all'
,
True
)
return
core
.
ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
return
_C_ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
keepdim
,
'reduce_all'
,
False
)
'reduce_all'
,
False
)
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
if
dim
is
None
:
attrs
[
'reduce_all'
]
=
True
attrs
[
'reduce_all'
]
=
True
...
@@ -281,8 +282,8 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
...
@@ -281,8 +282,8 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
axis
is
None
:
axis
=
-
1
if
axis
is
None
:
axis
=
-
1
return
core
.
ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
return
_C_
ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'asvector'
,
asvector
)
'keepdim'
,
keepdim
,
'asvector'
,
asvector
)
if
porder
is
not
None
:
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
if
axis
is
not
None
:
if
axis
is
not
None
:
...
@@ -576,7 +577,7 @@ def dot(x, y, name=None):
...
@@ -576,7 +577,7 @@ def dot(x, y, name=None):
op_type
=
'dot'
op_type
=
'dot'
# skip var type check in dygraph mode to improve efficiency
# skip var type check in dygraph mode to improve efficiency
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_type
)
op
=
getattr
(
_C_
ops
,
op_type
)
return
op
(
x
,
y
)
return
op
(
x
,
y
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
...
@@ -651,7 +652,7 @@ def t(input, name=None):
...
@@ -651,7 +652,7 @@ def t(input, name=None):
return
input
return
input
# 2-D tensor
# 2-D tensor
perm
=
[
1
,
0
]
perm
=
[
1
,
0
]
out
,
_
=
core
.
ops
.
transpose2
(
input
,
'axis'
,
perm
)
out
,
_
=
_C_
ops
.
transpose2
(
input
,
'axis'
,
perm
)
return
out
return
out
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -713,9 +714,9 @@ def cross(x, y, axis=None, name=None):
...
@@ -713,9 +714,9 @@ def cross(x, y, axis=None, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
axis
is
not
None
:
if
axis
is
not
None
:
return
core
.
ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
return
_C_
ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
else
:
else
:
return
core
.
ops
.
cross
(
x
,
y
)
return
_C_
ops
.
cross
(
x
,
y
)
helper
=
LayerHelper
(
"cross"
,
**
locals
())
helper
=
LayerHelper
(
"cross"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
@@ -771,7 +772,7 @@ def cholesky(x, upper=False, name=None):
...
@@ -771,7 +772,7 @@ def cholesky(x, upper=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
cholesky
(
x
,
"upper"
,
upper
)
return
_C_
ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
...
@@ -834,7 +835,7 @@ def bmm(x, y, name=None):
...
@@ -834,7 +835,7 @@ def bmm(x, y, name=None):
format
(
x_shape
,
y_shape
))
format
(
x_shape
,
y_shape
))
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
bmm
(
x
,
y
)
return
_C_
ops
.
bmm
(
x
,
y
)
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
@@ -867,7 +868,7 @@ def histogram(input, bins=100, min=0, max=0):
...
@@ -867,7 +868,7 @@ def histogram(input, bins=100, min=0, max=0):
print(result) # [0, 2, 1, 0]
print(result) # [0, 2, 1, 0]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
return
_C_
ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
check_variable_and_dtype
(
check_variable_and_dtype
(
...
@@ -914,7 +915,7 @@ def mv(x, vec, name=None):
...
@@ -914,7 +915,7 @@ def mv(x, vec, name=None):
out = paddle.mv(x, vec)
out = paddle.mv(x, vec)
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
mv
(
x
,
vec
)
out
=
_C_
ops
.
mv
(
x
,
vec
)
return
out
return
out
def
__check_input
(
x
,
vec
):
def
__check_input
(
x
,
vec
):
...
...
python/paddle/tensor/logic.py
浏览文件 @
f05098b5
...
@@ -27,6 +27,7 @@ from ..fluid.layers import logical_or # noqa: F401
...
@@ -27,6 +27,7 @@ from ..fluid.layers import logical_or # noqa: F401
from
..fluid.layers
import
logical_xor
# noqa: F401
from
..fluid.layers
import
logical_xor
# noqa: F401
from
paddle.common_ops_import
import
core
from
paddle.common_ops_import
import
core
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -60,7 +61,7 @@ def equal_all(x, y, name=None):
...
@@ -60,7 +61,7 @@ def equal_all(x, y, name=None):
print(result2) # result2 = [False ]
print(result2) # result2 = [False ]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
equal_all
(
x
,
y
)
return
_C_
ops
.
equal_all
(
x
,
y
)
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
...
@@ -123,9 +124,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
...
@@ -123,9 +124,9 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
allclose
(
x
,
y
,
'rtol'
,
return
_C_
ops
.
allclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
str
(
atol
),
'equal_nan'
,
equal_nan
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
x
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
check_variable_and_dtype
(
y
,
"input"
,
[
'float32'
,
'float64'
],
'allclose'
)
...
@@ -174,7 +175,7 @@ def equal(x, y, name=None):
...
@@ -174,7 +175,7 @@ def equal(x, y, name=None):
print(result1) # result1 = [True False False]
print(result1) # result1 = [True False False]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
equal
(
x
,
y
)
return
_C_
ops
.
equal
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
)
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
)
...
@@ -216,7 +217,7 @@ def greater_equal(x, y, name=None):
...
@@ -216,7 +217,7 @@ def greater_equal(x, y, name=None):
print(result1) # result1 = [True False True]
print(result1) # result1 = [True False True]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
greater_equal
(
x
,
y
)
return
_C_
ops
.
greater_equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
...
@@ -262,7 +263,7 @@ def greater_than(x, y, name=None):
...
@@ -262,7 +263,7 @@ def greater_than(x, y, name=None):
print(result1) # result1 = [False False True]
print(result1) # result1 = [False False True]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
greater_than
(
x
,
y
)
return
_C_
ops
.
greater_than
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
...
@@ -309,7 +310,7 @@ def less_equal(x, y, name=None):
...
@@ -309,7 +310,7 @@ def less_equal(x, y, name=None):
print(result1) # result1 = [True True False]
print(result1) # result1 = [True True False]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
less_equal
(
x
,
y
)
return
_C_
ops
.
less_equal
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
)
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
)
...
@@ -352,7 +353,7 @@ def less_than(x, y, name=None):
...
@@ -352,7 +353,7 @@ def less_than(x, y, name=None):
print(result1) # result1 = [False True False]
print(result1) # result1 = [False True False]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
less_than
(
x
,
y
)
return
_C_
ops
.
less_than
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
)
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
)
...
@@ -395,7 +396,7 @@ def not_equal(x, y, name=None):
...
@@ -395,7 +396,7 @@ def not_equal(x, y, name=None):
print(result1) # result1 = [False True True]
print(result1) # result1 = [False True True]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
not_equal
(
x
,
y
)
return
_C_
ops
.
not_equal
(
x
,
y
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
)
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
)
...
@@ -441,7 +442,7 @@ def is_tensor(x):
...
@@ -441,7 +442,7 @@ def is_tensor(x):
def
_bitwise_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
def
_bitwise_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_name
)
op
=
getattr
(
_C_
ops
,
op_name
)
if
binary_op
:
if
binary_op
:
return
op
(
x
,
y
)
return
op
(
x
,
y
)
else
:
else
:
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
f05098b5
...
@@ -32,6 +32,7 @@ from ..fluid.layers import shard_index # noqa: F401
...
@@ -32,6 +32,7 @@ from ..fluid.layers import shard_index # noqa: F401
from
..fluid
import
layers
from
..fluid
import
layers
from
..fluid.dygraph.inplace_utils
import
inplace_apis_in_dygraph_only
from
..fluid.dygraph.inplace_utils
import
inplace_apis_in_dygraph_only
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -151,7 +152,7 @@ def broadcast_tensors(input, name=None):
...
@@ -151,7 +152,7 @@ def broadcast_tensors(input, name=None):
num_inputs
=
len
(
input
)
num_inputs
=
len
(
input
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
broadcast_tensors
(
input
,
num_inputs
)
return
_C_
ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
if
num_inputs
<
1
:
...
@@ -361,8 +362,8 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
...
@@ -361,8 +362,8 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
dy_out
,
_
=
core
.
ops
.
flatten_contiguous_range
(
dy_out
,
_
=
_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
'stop_axis'
,
stop_axis
)
return
dy_out
return
dy_out
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
...
@@ -403,8 +404,8 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
...
@@ -403,8 +404,8 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
if
start_axis
>
stop_axis
:
if
start_axis
>
stop_axis
:
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
dy_out
,
_
=
core
.
ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
dy_out
,
_
=
_C_
ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
'stop_axis'
,
stop_axis
)
return
dy_out
return
dy_out
...
@@ -460,7 +461,7 @@ def roll(x, shifts, axis=None, name=None):
...
@@ -460,7 +461,7 @@ def roll(x, shifts, axis=None, name=None):
axis
=
[]
axis
=
[]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
return
_C_
ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
helper
=
LayerHelper
(
"roll"
,
**
locals
())
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
...
@@ -705,7 +706,7 @@ def squeeze_(x, axis=None, name=None):
...
@@ -705,7 +706,7 @@ def squeeze_(x, axis=None, name=None):
elif
isinstance
(
axis
,
tuple
):
elif
isinstance
(
axis
,
tuple
):
axis
=
list
(
axis
)
axis
=
list
(
axis
)
out
,
_
=
core
.
ops
.
squeeze2_
(
x
,
'axes'
,
axis
)
out
,
_
=
_C_
ops
.
squeeze2_
(
x
,
'axes'
,
axis
)
return
out
return
out
...
@@ -766,7 +767,7 @@ def unique(x,
...
@@ -766,7 +767,7 @@ def unique(x,
axis
=
[
axis
]
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
inverse
,
indices
,
counts
=
core
.
ops
.
unique
(
out
,
inverse
,
indices
,
counts
=
_C_
ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
"is_sorted"
,
True
)
'axis'
,
axis
,
"is_sorted"
,
True
)
...
@@ -897,7 +898,7 @@ def unsqueeze_(x, axis, name=None):
...
@@ -897,7 +898,7 @@ def unsqueeze_(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axis
for
item
in
axis
]
]
out
,
_
=
core
.
ops
.
unsqueeze2_
(
x
,
'axes'
,
axis
)
out
,
_
=
_C_
ops
.
unsqueeze2_
(
x
,
'axes'
,
axis
)
return
out
return
out
...
@@ -951,7 +952,7 @@ def gather(x, index, axis=None, name=None):
...
@@ -951,7 +952,7 @@ def gather(x, index, axis=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
return
core
.
ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
return
_C_
ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
...
@@ -1024,7 +1025,7 @@ def unbind(input, axis=0):
...
@@ -1024,7 +1025,7 @@ def unbind(input, axis=0):
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
num
=
input_shape
[
axis_
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
return
_C_
ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
...
@@ -1116,7 +1117,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
...
@@ -1116,7 +1117,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
# [1., 1.]]
# [1., 1.]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
return
_C_
ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'scatter'
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'scatter'
)
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
...
@@ -1138,7 +1139,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
...
@@ -1138,7 +1139,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
"""
return
core
.
ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
return
_C_
ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
...
@@ -1293,7 +1294,7 @@ def tile(x, repeat_times, name=None):
...
@@ -1293,7 +1294,7 @@ def tile(x, repeat_times, name=None):
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
return
_C_
ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
if
isinstance
(
repeat_times
,
Variable
):
assert
len
(
repeat_times
.
shape
)
==
1
,
(
assert
len
(
repeat_times
.
shape
)
==
1
,
(
...
@@ -1376,7 +1377,7 @@ def expand_as(x, y, name=None):
...
@@ -1376,7 +1377,7 @@ def expand_as(x, y, name=None):
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
return
_C_
ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
)
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
)
...
@@ -1430,7 +1431,7 @@ def broadcast_to(x, shape, name=None):
...
@@ -1430,7 +1431,7 @@ def broadcast_to(x, shape, name=None):
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
return
_C_
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
(
'shape must be an 1-D Tensor.'
)
assert
len
(
shape
.
shape
)
==
1
,
(
'shape must be an 1-D Tensor.'
)
...
@@ -1517,7 +1518,7 @@ def expand(x, shape, name=None):
...
@@ -1517,7 +1518,7 @@ def expand(x, shape, name=None):
# [[1, 2, 3], [1, 2, 3]]
# [[1, 2, 3], [1, 2, 3]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
return
_C_
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
(
'shape must be an 1-D Tensor.'
)
assert
len
(
shape
.
shape
)
==
1
,
(
'shape must be an 1-D Tensor.'
)
...
@@ -1663,11 +1664,11 @@ def reshape_(x, shape, name=None):
...
@@ -1663,11 +1664,11 @@ def reshape_(x, shape, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
for
item
in
shape
]
]
out
,
_
=
core
.
ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
out
,
_
=
_C_
ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
return
out
return
out
elif
isinstance
(
shape
,
Variable
):
elif
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
shape
.
stop_gradient
=
True
out
,
_
=
core
.
ops
.
reshape2_
(
x
,
shape
)
out
,
_
=
_C_
ops
.
reshape2_
(
x
,
shape
)
return
out
return
out
...
...
python/paddle/tensor/math.py
浏览文件 @
f05098b5
...
@@ -67,6 +67,7 @@ from ..fluid.layers import lgamma # noqa: F401
...
@@ -67,6 +67,7 @@ from ..fluid.layers import lgamma # noqa: F401
from
..fluid.layers
import
multiplex
# noqa: F401
from
..fluid.layers
import
multiplex
# noqa: F401
from
..fluid
import
layers
from
..fluid
import
layers
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -91,7 +92,7 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
...
@@ -91,7 +92,7 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
Please refer to :ref:`api_tensor_scale`.
Please refer to :ref:`api_tensor_scale`.
"""
"""
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
return
core
.
ops
.
scale_
(
x
,
'scale'
,
return
_C_
ops
.
scale_
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
)
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
)
...
@@ -144,7 +145,7 @@ def pow(x, y, name=None):
...
@@ -144,7 +145,7 @@ def pow(x, y, name=None):
# in dynamic graph mode
# in dynamic graph mode
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
isinstance
(
y
,
(
int
,
float
)):
if
isinstance
(
y
,
(
int
,
float
)):
return
core
.
ops
.
pow
(
x
,
'factor'
,
y
)
return
_C_
ops
.
pow
(
x
,
'factor'
,
y
)
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
return
_elementwise_op_in_dygraph
(
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=-
1
,
act
=
None
,
op_name
=
'elementwise_pow'
)
x
,
y
,
axis
=-
1
,
act
=
None
,
op_name
=
'elementwise_pow'
)
...
@@ -177,7 +178,7 @@ def _elementwise_op_in_dygraph(x,
...
@@ -177,7 +178,7 @@ def _elementwise_op_in_dygraph(x,
act
=
None
,
act
=
None
,
use_mkldnn
=
False
,
use_mkldnn
=
False
,
op_name
=
None
):
op_name
=
None
):
op
=
getattr
(
core
.
ops
,
op_name
)
op
=
getattr
(
_C_
ops
,
op_name
)
out
=
op
(
x
,
y
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
out
=
op
(
x
,
y
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
@@ -236,7 +237,7 @@ def add(x, y, name=None):
...
@@ -236,7 +237,7 @@ def add(x, y, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
elementwise_add
(
x
,
y
)
return
_C_
ops
.
elementwise_add
(
x
,
y
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
...
@@ -725,12 +726,12 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
...
@@ -725,12 +726,12 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
dtype_flag
:
if
dtype_flag
:
return
core
.
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
,
'in_dtype'
,
'reduce_all'
,
reduce_all_flag
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
x
.
dtype
,
'out_dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
convert_np_dtype_to_dtype_
(
dtype
))
else
:
else
:
return
core
.
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
'reduce_all'
,
reduce_all_flag
)
attrs
=
{
attrs
=
{
...
@@ -839,7 +840,7 @@ def add_n(inputs, name=None):
...
@@ -839,7 +840,7 @@ def add_n(inputs, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
isinstance
(
inputs
,
Variable
):
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
inputs
=
[
inputs
]
return
core
.
ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
return
_C_
ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
...
@@ -893,7 +894,7 @@ def trunc(input, name=None):
...
@@ -893,7 +894,7 @@ def trunc(input, name=None):
# [0., 0.]]))
# [0., 0.]]))
'''
'''
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
trunc
(
input
)
return
_C_
ops
.
trunc
(
input
)
else
:
else
:
inputs
=
{
"X"
:
input
}
inputs
=
{
"X"
:
input
}
attrs
=
{}
attrs
=
{}
...
@@ -948,7 +949,7 @@ def mm(input, mat2, name=None):
...
@@ -948,7 +949,7 @@ def mm(input, mat2, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
_varbase_creator
(
dtype
=
input
.
dtype
)
out
=
_varbase_creator
(
dtype
=
input
.
dtype
)
core
.
ops
.
matmul
(
input
,
mat2
,
out
)
_C_
ops
.
matmul
(
input
,
mat2
,
out
)
return
out
return
out
def
__check_input
(
x
,
y
):
def
__check_input
(
x
,
y
):
...
@@ -1054,7 +1055,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
...
@@ -1054,7 +1055,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
out
=
_C_
ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
return
out
return
out
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
...
@@ -1121,7 +1122,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
...
@@ -1121,7 +1122,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
axis
=
[
0
]
axis
=
[
0
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
return
_C_
ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x'
,
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
[
'float32'
,
'float64'
],
...
@@ -1165,7 +1166,7 @@ def inverse(x, name=None):
...
@@ -1165,7 +1166,7 @@ def inverse(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
inverse
(
x
)
return
_C_
ops
.
inverse
(
x
)
def
_check_input
(
x
):
def
_check_input
(
x
):
check_variable_and_dtype
(
x
,
'x'
,
check_variable_and_dtype
(
x
,
'x'
,
...
@@ -1256,7 +1257,7 @@ def max(x, axis=None, keepdim=False, name=None):
...
@@ -1256,7 +1257,7 @@ def max(x, axis=None, keepdim=False, name=None):
reduce_all
=
True
if
axis
==
None
or
axis
==
[]
else
False
reduce_all
=
True
if
axis
==
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'max'
,
**
locals
())
helper
=
LayerHelper
(
'max'
,
**
locals
())
...
@@ -1345,7 +1346,7 @@ def min(x, axis=None, keepdim=False, name=None):
...
@@ -1345,7 +1346,7 @@ def min(x, axis=None, keepdim=False, name=None):
reduce_all
=
True
if
axis
==
None
or
axis
==
[]
else
False
reduce_all
=
True
if
axis
==
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'min'
,
**
locals
())
helper
=
LayerHelper
(
'min'
,
**
locals
())
...
@@ -1391,7 +1392,7 @@ def log1p(x, name=None):
...
@@ -1391,7 +1392,7 @@ def log1p(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
log1p
(
x
)
return
_C_
ops
.
log1p
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
inputs
=
{
'X'
:
[
x
]}
inputs
=
{
'X'
:
[
x
]}
...
@@ -1440,7 +1441,7 @@ def log2(x, name=None):
...
@@ -1440,7 +1441,7 @@ def log2(x, name=None):
print(res) # [1.0]
print(res) # [1.0]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
log2
(
x
)
return
_C_
ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
inputs
=
{
'X'
:
[
x
]}
inputs
=
{
'X'
:
[
x
]}
...
@@ -1490,7 +1491,7 @@ def log10(x, name=None):
...
@@ -1490,7 +1491,7 @@ def log10(x, name=None):
print(res) # [1.0]
print(res) # [1.0]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
log10
(
x
)
return
_C_
ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
inputs
=
{
'X'
:
[
x
]}
inputs
=
{
'X'
:
[
x
]}
...
@@ -1557,7 +1558,7 @@ def clip(x, min=None, max=None, name=None):
...
@@ -1557,7 +1558,7 @@ def clip(x, min=None, max=None, name=None):
max
=
max
.
numpy
().
item
(
0
)
max
=
max
.
numpy
().
item
(
0
)
min
=
min_
if
min
is
None
else
min
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
max
=
max_
if
max
is
None
else
max
return
core
.
ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
return
_C_
ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
if
min
is
not
None
:
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
...
@@ -1610,7 +1611,7 @@ def clip_(x, min=None, max=None, name=None):
...
@@ -1610,7 +1611,7 @@ def clip_(x, min=None, max=None, name=None):
max
=
max
.
numpy
().
item
(
0
)
max
=
max
.
numpy
().
item
(
0
)
min
=
fmin
if
min
is
None
else
min
min
=
fmin
if
min
is
None
else
min
max
=
fmax
if
max
is
None
else
max
max
=
fmax
if
max
is
None
else
max
return
core
.
ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
return
_C_
ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
...
@@ -1656,7 +1657,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
...
@@ -1656,7 +1657,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
return
_C_
ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
inputs
=
{
'Input'
:
[
x
]}
inputs
=
{
'Input'
:
[
x
]}
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
}
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
}
...
@@ -1768,7 +1769,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
...
@@ -1768,7 +1769,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
return
_C_
ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
input
,
offset
,
dim1
,
dim2
):
def
__check_input
(
input
,
offset
,
dim1
,
dim2
):
check_dtype
(
x
.
dtype
,
'Input'
,
check_dtype
(
x
.
dtype
,
'Input'
,
...
@@ -1845,7 +1846,7 @@ ${comment}
...
@@ -1845,7 +1846,7 @@ ${comment}
# [21, 24, 27, 28, 32, 36]])
# [21, 24, 27, 28, 32, 36]])
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
kron
(
x
,
y
)
return
_C_
ops
.
kron
(
x
,
y
)
helper
=
LayerHelper
(
'kron'
,
**
locals
())
helper
=
LayerHelper
(
'kron'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
...
@@ -1906,9 +1907,9 @@ def cumsum(x, axis=None, dtype=None, name=None):
...
@@ -1906,9 +1907,9 @@ def cumsum(x, axis=None, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
axis
is
None
:
if
axis
is
None
:
return
core
.
ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
return
_C_
ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
else
:
return
core
.
ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
return
_C_
ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
locals_var
=
locals
().
copy
()
locals_var
=
locals
().
copy
()
...
@@ -1941,7 +1942,7 @@ def isfinite(x, name=None):
...
@@ -1941,7 +1942,7 @@ def isfinite(x, name=None):
print(out) # [False True True False True False False]
print(out) # [False True True False True False False]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
isfinite_v2
(
x
)
return
_C_
ops
.
isfinite_v2
(
x
)
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
...
@@ -1969,7 +1970,7 @@ def isinf(x, name=None):
...
@@ -1969,7 +1970,7 @@ def isinf(x, name=None):
print(out) # [ True False False True False False False]
print(out) # [ True False False True False False False]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
isinf_v2
(
x
)
return
_C_
ops
.
isinf_v2
(
x
)
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
...
@@ -1997,7 +1998,7 @@ def isnan(x, name=None):
...
@@ -1997,7 +1998,7 @@ def isnan(x, name=None):
print(out) # [False False False False False True True]
print(out) # [False False False False False True True]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
isnan_v2
(
x
)
return
_C_
ops
.
isnan_v2
(
x
)
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
...
@@ -2094,7 +2095,7 @@ def sign(x, name=None):
...
@@ -2094,7 +2095,7 @@ def sign(x, name=None):
print(out) # [1.0, 0.0, -1.0, 1.0]
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
sign
(
x
)
return
_C_
ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
helper
=
LayerHelper
(
"sign"
,
**
locals
())
...
@@ -2131,7 +2132,7 @@ def tanh(x, name=None):
...
@@ -2131,7 +2132,7 @@ def tanh(x, name=None):
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
tanh
(
x
)
return
_C_
ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
...
@@ -2146,7 +2147,7 @@ def tanh_(x, name=None):
...
@@ -2146,7 +2147,7 @@ def tanh_(x, name=None):
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
Please refer to :ref:`api_tensor_tanh`.
"""
"""
return
core
.
ops
.
tanh_
(
x
)
return
_C_
ops
.
tanh_
(
x
)
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
...
@@ -2173,7 +2174,7 @@ def increment(x, value=1.0, name=None):
...
@@ -2173,7 +2174,7 @@ def increment(x, value=1.0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
increment
(
x
,
'step'
,
value
)
return
_C_
ops
.
increment
(
x
,
'step'
,
value
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
)
'increment'
)
...
@@ -2255,7 +2256,7 @@ def all(x, axis=None, keepdim=False, name=None):
...
@@ -2255,7 +2256,7 @@ def all(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
core
.
ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
'reduce_all'
,
reduce_all_flag
)
attrs
=
{
attrs
=
{
...
@@ -2263,7 +2264,6 @@ def all(x, axis=None, keepdim=False, name=None):
...
@@ -2263,7 +2264,6 @@ def all(x, axis=None, keepdim=False, name=None):
'keep_dim'
:
keepdim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all_flag
'reduce_all'
:
reduce_all_flag
}
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
...
@@ -2348,7 +2348,7 @@ def any(x, axis=None, keepdim=False, name=None):
...
@@ -2348,7 +2348,7 @@ def any(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
core
.
ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
'reduce_all'
,
reduce_all_flag
)
attrs
=
{
attrs
=
{
...
@@ -2428,7 +2428,7 @@ def conj(x, name=None):
...
@@ -2428,7 +2428,7 @@ def conj(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
conj
(
x
)
return
_C_
ops
.
conj
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
)
...
@@ -2467,7 +2467,7 @@ def digamma(x, name=None):
...
@@ -2467,7 +2467,7 @@ def digamma(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
digamma
(
x
)
return
_C_
ops
.
digamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
...
@@ -2543,7 +2543,7 @@ def atan2(y, x, name=None):
...
@@ -2543,7 +2543,7 @@ def atan2(y, x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
atan2
(
y
,
x
)
return
_C_
ops
.
atan2
(
y
,
x
)
else
:
else
:
check_variable_and_dtype
(
y
,
'y'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
check_variable_and_dtype
(
y
,
'y'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
...
...
python/paddle/tensor/random.py
浏览文件 @
f05098b5
...
@@ -20,6 +20,7 @@ from ..fluid.layer_helper import LayerHelper
...
@@ -20,6 +20,7 @@ from ..fluid.layer_helper import LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
check_shape
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
check_shape
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -65,7 +66,7 @@ def bernoulli(x, name=None):
...
@@ -65,7 +66,7 @@ def bernoulli(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
bernoulli
(
x
)
return
_C_
ops
.
bernoulli
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
...
@@ -130,8 +131,8 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
...
@@ -130,8 +131,8 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
"multinomial op is not supported on ROCM yet."
)
"multinomial op is not supported on ROCM yet."
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
return
_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
'replacement'
,
replacement
)
replacement
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
...
@@ -189,10 +190,9 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
...
@@ -189,10 +190,9 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
core
.
ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
return
_C_ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
float
(
mean
),
'std'
,
float
(
mean
),
'std'
,
float
(
std
),
'seed'
,
seed
,
'dtype'
,
float
(
std
),
'seed'
,
seed
,
'dtype'
,
dtype
)
dtype
)
check_shape
(
shape
,
op_type_for_check
)
check_shape
(
shape
,
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
...
@@ -499,9 +499,9 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
...
@@ -499,9 +499,9 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
core
.
ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
return
_C_
ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
float
(
min
),
'max'
,
float
(
min
),
'max'
,
float
(
max
),
'seed'
,
seed
,
'dtype'
,
dtype
)
float
(
max
),
'seed'
,
seed
,
'dtype'
,
dtype
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
Variable
),
'uniform/rand'
)
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
check_dtype
(
dtype
,
'dtype'
,
(
'float32'
,
'float64'
),
'uniform/rand'
)
...
@@ -599,8 +599,8 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
...
@@ -599,8 +599,8 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
core
.
ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
return
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
'seed'
,
0
,
'dtype'
,
dtype
)
0
,
'dtype'
,
dtype
)
check_shape
(
shape
,
'randint'
)
check_shape
(
shape
,
'randint'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
...
@@ -656,7 +656,7 @@ def randperm(n, dtype="int64", name=None):
...
@@ -656,7 +656,7 @@ def randperm(n, dtype="int64", name=None):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
return
_C_
ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
if
n
<
1
:
if
n
<
1
:
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
...
...
python/paddle/tensor/search.py
浏览文件 @
f05098b5
...
@@ -20,6 +20,7 @@ from paddle.common_ops_import import in_dygraph_mode
...
@@ -20,6 +20,7 @@ from paddle.common_ops_import import in_dygraph_mode
from
paddle.common_ops_import
import
convert_np_dtype_to_dtype_
from
paddle.common_ops_import
import
convert_np_dtype_to_dtype_
from
paddle.common_ops_import
import
Variable
from
paddle.common_ops_import
import
Variable
from
paddle.common_ops_import
import
VarDesc
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
# TODO: define searching & indexing functions of a tensor
# TODO: define searching & indexing functions of a tensor
# from ..fluid.layers import has_inf #DEFINE_ALIAS
# from ..fluid.layers import has_inf #DEFINE_ALIAS
...
@@ -88,7 +89,7 @@ def argsort(x, axis=-1, descending=False, name=None):
...
@@ -88,7 +89,7 @@ def argsort(x, axis=-1, descending=False, name=None):
# [0 2 1 1]]]
# [0 2 1 1]]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
_
,
ids
=
core
.
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
_
,
ids
=
_C_
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
return
ids
check_variable_and_dtype
(
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
...
@@ -165,8 +166,8 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -165,8 +166,8 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
axis
=
0
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_
ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
keepdim
,
'flatten'
,
flatten
)
return
out
return
out
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
helper
=
LayerHelper
(
"argmax"
,
**
locals
())
...
@@ -242,8 +243,8 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
...
@@ -242,8 +243,8 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
axis
=
0
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
=
core
.
ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_
ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
keepdim
,
'flatten'
,
flatten
)
return
out
return
out
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
helper
=
LayerHelper
(
"argmin"
,
**
locals
())
...
@@ -302,7 +303,7 @@ def index_select(x, index, axis=0, name=None):
...
@@ -302,7 +303,7 @@ def index_select(x, index, axis=0, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
return
_C_
ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -378,7 +379,7 @@ def nonzero(x, as_tuple=False):
...
@@ -378,7 +379,7 @@ def nonzero(x, as_tuple=False):
rank
=
len
(
shape
)
rank
=
len
(
shape
)
if
in_dygraph_mode
():
if
in_dygraph_mode
():
outs
=
core
.
ops
.
where_index
(
x
)
outs
=
_C_
ops
.
where_index
(
x
)
else
:
else
:
outs
=
layers
.
where
(
x
)
outs
=
layers
.
where
(
x
)
...
@@ -452,7 +453,7 @@ def sort(x, axis=-1, descending=False, name=None):
...
@@ -452,7 +453,7 @@ def sort(x, axis=-1, descending=False, name=None):
# [5. 7. 7. 9.]]]
# [5. 7. 7. 9.]]]
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
out
,
_
=
core
.
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
out
,
_
=
_C_
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
out
return
out
helper
=
LayerHelper
(
"sort"
,
**
locals
())
helper
=
LayerHelper
(
"sort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
out
=
helper
.
create_variable_for_type_inference
(
...
@@ -517,7 +518,7 @@ def where(condition, x, y, name=None):
...
@@ -517,7 +518,7 @@ def where(condition, x, y, name=None):
y_shape
=
list
(
y
.
shape
)
y_shape
=
list
(
y
.
shape
)
if
x_shape
==
y_shape
:
if
x_shape
==
y_shape
:
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
where
(
condition
,
x
,
y
)
return
_C_
ops
.
where
(
condition
,
x
,
y
)
else
:
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
@@ -612,7 +613,7 @@ def index_sample(x, index):
...
@@ -612,7 +613,7 @@ def index_sample(x, index):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
index_sample
(
x
,
index
)
return
_C_
ops
.
index_sample
(
x
,
index
)
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -660,7 +661,7 @@ def masked_select(x, mask, name=None):
...
@@ -660,7 +661,7 @@ def masked_select(x, mask, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
masked_select
(
x
,
mask
)
return
_C_
ops
.
masked_select
(
x
,
mask
)
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
@@ -732,13 +733,13 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
...
@@ -732,13 +733,13 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
if
axis
is
None
:
if
axis
is
None
:
out
,
indices
=
core
.
ops
.
top_k_v2
(
x
,
'k'
,
out
,
indices
=
_C_
ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'largest'
,
largest
,
int
(
k
),
'largest'
,
largest
,
'sorted'
,
'sorted'
,
sorted
)
sorted
)
else
:
else
:
out
,
indices
=
core
.
ops
.
top_k_v2
(
x
,
'k'
,
out
,
indices
=
_C_
ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'axis'
,
axis
,
'largest'
,
int
(
k
),
'axis'
,
axis
,
'largest'
,
largest
,
'sorted'
,
sorted
)
largest
,
'sorted'
,
sorted
)
return
out
,
indices
return
out
,
indices
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
helper
=
LayerHelper
(
"top_k_v2"
,
**
locals
())
...
...
python/paddle/tensor/stat.py
浏览文件 @
f05098b5
...
@@ -22,6 +22,7 @@ from ..fluid import layers
...
@@ -22,6 +22,7 @@ from ..fluid import layers
from
.search
import
where
from
.search
import
where
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
import
paddle
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
__all__
=
[]
...
@@ -88,8 +89,8 @@ def mean(x, axis=None, keepdim=False, name=None):
...
@@ -88,8 +89,8 @@ def mean(x, axis=None, keepdim=False, name=None):
axis
=
[
0
]
axis
=
[
0
]
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x/input'
,
[
'float32'
,
'float64'
],
check_variable_and_dtype
(
x
,
'x/input'
,
[
'float32'
,
'float64'
],
'mean/reduce_mean'
)
'mean/reduce_mean'
)
...
@@ -236,7 +237,7 @@ def numel(x, name=None):
...
@@ -236,7 +237,7 @@ def numel(x, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
size
(
x
)
return
_C_
ops
.
size
(
x
)
if
not
isinstance
(
x
,
Variable
):
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"x must be a Tensor in numel"
)
raise
TypeError
(
"x must be a Tensor in numel"
)
...
...
python/paddle/vision/ops.py
浏览文件 @
f05098b5
...
@@ -21,6 +21,7 @@ from ..nn import Layer
...
@@ -21,6 +21,7 @@ from ..nn import Layer
from
..fluid.initializer
import
Normal
from
..fluid.initializer
import
Normal
from
paddle.common_ops_import
import
*
from
paddle.common_ops_import
import
*
from
paddle
import
_C_ops
__all__
=
[
#noqa
__all__
=
[
#noqa
'yolo_loss'
,
'yolo_loss'
,
...
@@ -189,7 +190,7 @@ def yolo_loss(x,
...
@@ -189,7 +190,7 @@ def yolo_loss(x,
"""
"""
if
in_dygraph_mode
()
and
gt_score
is
None
:
if
in_dygraph_mode
()
and
gt_score
is
None
:
loss
=
core
.
ops
.
yolov3_loss
(
loss
=
_C_
ops
.
yolov3_loss
(
x
,
gt_box
,
gt_label
,
'anchors'
,
anchors
,
'anchor_mask'
,
anchor_mask
,
x
,
gt_box
,
gt_label
,
'anchors'
,
anchors
,
'anchor_mask'
,
anchor_mask
,
'class_num'
,
class_num
,
'ignore_thresh'
,
ignore_thresh
,
'class_num'
,
class_num
,
'ignore_thresh'
,
ignore_thresh
,
'downsample_ratio'
,
downsample_ratio
,
'use_label_smooth'
,
'downsample_ratio'
,
downsample_ratio
,
'use_label_smooth'
,
...
@@ -372,7 +373,7 @@ def yolo_box(x,
...
@@ -372,7 +373,7 @@ def yolo_box(x,
scale_x_y=1.)
scale_x_y=1.)
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
boxes
,
scores
=
core
.
ops
.
yolo_box
(
boxes
,
scores
=
_C_
ops
.
yolo_box
(
x
,
img_size
,
'anchors'
,
anchors
,
'class_num'
,
class_num
,
x
,
img_size
,
'anchors'
,
anchors
,
'class_num'
,
class_num
,
'conf_thresh'
,
conf_thresh
,
'downsample_ratio'
,
downsample_ratio
,
'conf_thresh'
,
conf_thresh
,
'downsample_ratio'
,
downsample_ratio
,
'clip_bbox'
,
clip_bbox
,
'scale_x_y'
,
scale_x_y
,
'iou_aware'
,
'clip_bbox'
,
clip_bbox
,
'scale_x_y'
,
scale_x_y
,
'iou_aware'
,
...
@@ -551,11 +552,10 @@ def deform_conv2d(x,
...
@@ -551,11 +552,10 @@ def deform_conv2d(x,
'im2col_step'
,
1
)
'im2col_step'
,
1
)
if
use_deform_conv2d_v1
:
if
use_deform_conv2d_v1
:
op_type
=
'deformable_conv_v1'
op_type
=
'deformable_conv_v1'
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
offset
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
offset
,
weight
,
*
attrs
)
else
:
else
:
op_type
=
'deformable_conv'
op_type
=
'deformable_conv'
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
offset
,
mask
,
weight
,
pre_bias
=
getattr
(
_C_ops
,
op_type
)(
x
,
offset
,
mask
,
weight
,
*
attrs
)
*
attrs
)
if
bias
is
not
None
:
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
1
)
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
1
)
else
:
else
:
...
@@ -839,7 +839,7 @@ def read_file(filename, name=None):
...
@@ -839,7 +839,7 @@ def read_file(filename, name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
read_file
(
'filename'
,
filename
)
return
_C_
ops
.
read_file
(
'filename'
,
filename
)
inputs
=
dict
()
inputs
=
dict
()
attrs
=
{
'filename'
:
filename
}
attrs
=
{
'filename'
:
filename
}
...
@@ -886,7 +886,7 @@ def decode_jpeg(x, mode='unchanged', name=None):
...
@@ -886,7 +886,7 @@ def decode_jpeg(x, mode='unchanged', name=None):
"""
"""
if
in_dygraph_mode
():
if
in_dygraph_mode
():
return
core
.
ops
.
decode_jpeg
(
x
,
"mode"
,
mode
)
return
_C_
ops
.
decode_jpeg
(
x
,
"mode"
,
mode
)
inputs
=
{
'X'
:
x
}
inputs
=
{
'X'
:
x
}
attrs
=
{
"mode"
:
mode
}
attrs
=
{
"mode"
:
mode
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录