Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
f05098b5
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
f05098b5
编写于
7月 15, 2021
作者:
W
wanghuancoder
提交者:
GitHub
7月 15, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
cache core.ops (#34058)
* cache core.ops, test=develop * refine, test=develop
上级
2850391d
变更
51
显示空白变更内容
内联
并排
Showing
51 changed file
with
578 addition
and
516 deletion
+578
-516
python/paddle/_C_ops.py
python/paddle/_C_ops.py
+21
-0
python/paddle/distributed/collective.py
python/paddle/distributed/collective.py
+52
-52
python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py
...ptimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py
+3
-2
python/paddle/distribution.py
python/paddle/distribution.py
+11
-10
python/paddle/fluid/contrib/layers/nn.py
python/paddle/fluid/contrib/layers/nn.py
+3
-2
python/paddle/fluid/contrib/optimizer.py
python/paddle/fluid/contrib/optimizer.py
+2
-1
python/paddle/fluid/dygraph/amp/loss_scaler.py
python/paddle/fluid/dygraph/amp/loss_scaler.py
+3
-2
python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py
...paddle/fluid/dygraph/dygraph_to_static/partial_program.py
+2
-1
python/paddle/fluid/dygraph/inplace_utils.py
python/paddle/fluid/dygraph/inplace_utils.py
+2
-1
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+6
-5
python/paddle/fluid/dygraph/nn.py
python/paddle/fluid/dygraph/nn.py
+14
-13
python/paddle/fluid/dygraph_utils.py
python/paddle/fluid/dygraph_utils.py
+4
-3
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-1
python/paddle/fluid/layers/detection.py
python/paddle/fluid/layers/detection.py
+4
-3
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+3
-2
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+10
-9
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+3
-2
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+60
-62
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+16
-15
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+10
-9
python/paddle/fluid/regularizer.py
python/paddle/fluid/regularizer.py
+2
-1
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
...paddle/fluid/tests/unittests/test_deprecated_decorator.py
+1
-1
python/paddle/incubate/optimizer/modelaverage.py
python/paddle/incubate/optimizer/modelaverage.py
+2
-1
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+3
-2
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+28
-27
python/paddle/nn/functional/common.py
python/paddle/nn/functional/common.py
+14
-14
python/paddle/nn/functional/conv.py
python/paddle/nn/functional/conv.py
+6
-5
python/paddle/nn/functional/input.py
python/paddle/nn/functional/input.py
+4
-3
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+71
-70
python/paddle/nn/functional/norm.py
python/paddle/nn/functional/norm.py
+10
-9
python/paddle/nn/functional/pooling.py
python/paddle/nn/functional/pooling.py
+27
-25
python/paddle/nn/functional/vision.py
python/paddle/nn/functional/vision.py
+7
-6
python/paddle/nn/layer/distance.py
python/paddle/nn/layer/distance.py
+4
-3
python/paddle/nn/layer/norm.py
python/paddle/nn/layer/norm.py
+2
-1
python/paddle/nn/layer/rnn.py
python/paddle/nn/layer/rnn.py
+2
-2
python/paddle/nn/quant/quant_layers.py
python/paddle/nn/quant/quant_layers.py
+6
-5
python/paddle/optimizer/adam.py
python/paddle/optimizer/adam.py
+2
-1
python/paddle/optimizer/lamb.py
python/paddle/optimizer/lamb.py
+2
-1
python/paddle/optimizer/momentum.py
python/paddle/optimizer/momentum.py
+2
-1
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+2
-1
python/paddle/optimizer/sgd.py
python/paddle/optimizer/sgd.py
+3
-2
python/paddle/tensor/attribute.py
python/paddle/tensor/attribute.py
+3
-2
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+17
-16
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+16
-15
python/paddle/tensor/logic.py
python/paddle/tensor/logic.py
+12
-11
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+20
-19
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+37
-37
python/paddle/tensor/random.py
python/paddle/tensor/random.py
+13
-13
python/paddle/tensor/search.py
python/paddle/tensor/search.py
+18
-17
python/paddle/tensor/stat.py
python/paddle/tensor/stat.py
+4
-3
python/paddle/vision/ops.py
python/paddle/vision/ops.py
+7
-7
未找到文件。
python/paddle/_C_ops.py
0 → 100644
浏览文件 @
f05098b5
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
paddle.fluid
import
core
__all__
=
[]
for
name
in
dir
(
core
.
ops
):
globals
()[
name
]
=
getattr
(
core
.
ops
,
name
)
__all__
.
append
(
name
)
python/paddle/distributed/collective.py
浏览文件 @
f05098b5
...
...
@@ -32,6 +32,7 @@ import paddle
from
.fleet
import
fleet
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle
import
_C_ops
import
paddle.fluid.dygraph_utils
as
dygraph_utils
__all__
=
[]
...
...
@@ -191,7 +192,7 @@ def barrier(group=None):
temp
=
fill_constant
([
1
],
dtype
=
"int32"
,
value
=
"1"
)
if
in_dygraph_mode
():
return
core
.
ops
.
barrier
(
temp
,
temp
,
'ring_id'
,
ring_id
)
return
_C_
ops
.
barrier
(
temp
,
temp
,
'ring_id'
,
ring_id
)
op_type
=
'barrier'
...
...
@@ -318,7 +319,7 @@ def wait(tensor, group=None, use_calc_stream=True):
def
_sync_calc_stream
(
tensor
):
if
in_dygraph_mode
():
return
core
.
ops
.
c_sync_calc_stream
(
tensor
,
tensor
)
return
_C_
ops
.
c_sync_calc_stream
(
tensor
,
tensor
)
op_type
=
'c_sync_calc_stream'
...
...
@@ -332,8 +333,7 @@ def _sync_calc_stream(tensor):
def
_sync_comm_stream
(
tensor
,
ring_id
=
0
):
if
in_dygraph_mode
():
return
core
.
ops
.
c_sync_comm_stream
([
tensor
],
[
tensor
],
'ring_id'
,
ring_id
)
return
_C_ops
.
c_sync_comm_stream
([
tensor
],
[
tensor
],
'ring_id'
,
ring_id
)
op_type
=
'c_sync_comm_stream'
...
...
@@ -391,9 +391,9 @@ def broadcast(tensor, src, group=None, use_calc_stream=True):
assert
gsrc
>=
0
,
(
"src rank out of group, need global rank"
)
if
in_dygraph_mode
():
return
core
.
ops
.
c_broadcast
(
tensor
,
tensor
,
'root'
,
gsrc
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
return
_C_
ops
.
c_broadcast
(
tensor
,
tensor
,
'root'
,
gsrc
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
op_type
=
'c_broadcast'
check_variable_and_dtype
(
...
...
@@ -453,17 +453,17 @@ def all_reduce(tensor, op=ReduceOp.SUM, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
if
op
==
ReduceOp
.
SUM
:
return
core
.
ops
.
c_allreduce_sum_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
return
_C_ops
.
c_allreduce_sum_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
elif
op
==
ReduceOp
.
MAX
:
return
core
.
ops
.
c_allreduce_max_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
return
_C_ops
.
c_allreduce_max_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
elif
op
==
ReduceOp
.
MIN
:
return
core
.
ops
.
c_allreduce_min_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
return
_C_ops
.
c_allreduce_min_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
elif
op
==
ReduceOp
.
PROD
:
return
core
.
ops
.
c_allreduce_prod_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
return
_C_ops
.
c_allreduce_prod_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
else
:
raise
ValueError
(
"Unknown parameter: {}."
.
format
(
op
))
...
...
@@ -539,19 +539,19 @@ def reduce(tensor, dst, op=ReduceOp.SUM, group=None, use_calc_stream=True):
if
in_dygraph_mode
():
if
op
==
ReduceOp
.
SUM
:
return
core
.
ops
.
c_reduce_sum
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_sum
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
elif
op
==
ReduceOp
.
MAX
:
return
core
.
ops
.
c_reduce_max
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_max
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
elif
op
==
ReduceOp
.
MIN
:
return
core
.
ops
.
c_reduce_min
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_min
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
elif
op
==
ReduceOp
.
PROD
:
return
core
.
ops
.
c_reduce_prod
(
tensor
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_reduce_prod
(
tensor
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'root_id'
,
gdst
)
else
:
...
...
@@ -637,7 +637,7 @@ def all_gather(tensor_list, tensor, group=None, use_calc_stream=True):
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
tensor
.
dtype
)
if
in_dygraph_mode
():
core
.
ops
.
c_allgather
(
tensor
,
out
,
'use_calc_stream'
,
use_calc_stream
,
_C_
ops
.
c_allgather
(
tensor
,
out
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
)
else
:
if
not
isinstance
(
tensor_list
,
list
):
...
...
@@ -725,7 +725,7 @@ def scatter(tensor, tensor_list=None, src=0, group=None, use_calc_stream=True):
tensor_list
.
append
(
tensor
)
temp
=
paddle
.
concat
(
tensor_list
,
axis
=
0
)
if
in_dygraph_mode
():
return
core
.
ops
.
c_scatter
(
temp
,
tensor
,
'use_calc_stream'
,
return
_C_
ops
.
c_scatter
(
temp
,
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'nranks'
,
nranks
,
'root'
,
gsrc
)
op_type
=
'c_scatter'
...
...
@@ -762,7 +762,7 @@ def _c_identity(tensor, group=None):
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
return
core
.
ops
.
c_identity
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
return
_C_
ops
.
c_identity
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
ring_id
,
'use_model_parallel'
,
True
)
op_type
=
'c_identity'
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
...
@@ -805,7 +805,7 @@ def _c_concat(tensor, group=None):
nranks
=
_get_global_env
().
world_size
if
group
is
None
else
group
.
nranks
if
in_dygraph_mode
():
return
core
.
ops
.
c_concat
(
tensor
,
'ring_id'
,
ring_id
,
'use_calc_stream'
,
return
_C_
ops
.
c_concat
(
tensor
,
'ring_id'
,
ring_id
,
'use_calc_stream'
,
True
,
'rank'
,
rank
,
'nranks'
,
nranks
,
'use_model_parallel'
,
True
)
...
...
@@ -853,7 +853,7 @@ def _c_split(tensor, group=None):
nranks
=
_get_global_env
().
world_size
if
group
is
None
else
group
.
nranks
if
in_dygraph_mode
():
return
core
.
ops
.
c_split
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
return
_C_
ops
.
c_split
(
tensor
,
'use_calc_stream'
,
True
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
,
'use_model_parallel'
,
True
)
...
...
@@ -892,7 +892,7 @@ def _mp_allreduce(tensor,
if
in_dygraph_mode
():
if
op
==
ReduceOp
.
SUM
:
return
core
.
ops
.
c_allreduce_sum_
(
return
_C_
ops
.
c_allreduce_sum_
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
"use_model_parallel"
,
use_model_parallel
)
else
:
...
...
@@ -933,7 +933,7 @@ def _c_lookup_table(table, index, start_index=0, name=None):
Tensor.
"""
if
in_dygraph_mode
():
return
core
.
ops
.
c_embedding
(
table
,
index
,
"start_index"
,
start_index
)
return
_C_
ops
.
c_embedding
(
table
,
index
,
"start_index"
,
start_index
)
op_type
=
'c_embedding'
helper
=
LayerHelper
(
op_type
,
**
locals
())
...
...
@@ -1008,7 +1008,7 @@ def _c_softmax_with_cross_entropy(logits,
label
=
paddle
.
unsqueeze
(
label
,
axis
=-
1
)
if
in_dygraph_mode
():
softmax
,
loss
=
core
.
ops
.
c_softmax_with_cross_entropy
(
softmax
,
loss
=
_C_
ops
.
c_softmax_with_cross_entropy
(
logits
,
label
,
'ring_id'
,
ring_id
,
'rank'
,
rank
,
'nranks'
,
nranks
)
if
not
return_softmax
:
return
loss
...
...
@@ -1043,8 +1043,8 @@ def _linear(x, weight, bias=None, name=None):
"""
if
in_dygraph_mode
():
pre_bias
=
_varbase_creator
(
dtype
=
x
.
dtype
)
core
.
ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
False
,
"alpha"
,
1
)
_C_ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
False
,
"alpha"
,
1
)
return
dygraph_utils
.
_append_bias_in_dygraph
(
pre_bias
,
bias
,
axis
=
len
(
x
.
shape
)
-
1
)
else
:
...
...
@@ -1491,7 +1491,7 @@ def alltoall(in_tensor_list, out_tensor_list, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
temp
=
paddle
.
concat
(
in_tensor_list
,
axis
=
0
)
if
in_dygraph_mode
():
core
.
ops
.
alltoall_
(
temp
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
_C_
ops
.
alltoall_
(
temp
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
)
else
:
op_type
=
'alltoall'
...
...
@@ -1557,7 +1557,7 @@ def send(tensor, dst=0, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
return
core
.
ops
.
send_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
return
_C_
ops
.
send_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'peer'
,
dst
)
op_type
=
'send_v2'
check_variable_and_dtype
(
...
...
@@ -1607,7 +1607,7 @@ def recv(tensor, src=0, group=None, use_calc_stream=True):
ring_id
=
0
if
group
is
None
else
group
.
id
if
in_dygraph_mode
():
return
core
.
ops
.
recv_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
return
_C_
ops
.
recv_v2
(
tensor
,
'use_calc_stream'
,
use_calc_stream
,
'ring_id'
,
ring_id
,
'peer'
,
src
,
'dtype'
,
tensor
.
dtype
,
'out_shape'
,
tensor
.
shape
)
op_type
=
'recv_v2'
...
...
python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/hybrid_parallel_gradscaler.py
浏览文件 @
f05098b5
...
...
@@ -22,6 +22,7 @@ from paddle.fluid.framework import Variable
import
types
from
paddle.fluid
import
core
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -64,7 +65,7 @@ class HybridParallelGradScaler:
param
.
_grad_ivar
()
for
param
in
optimizer
.
_parameter_list
if
param
.
_grad_ivar
()
is
not
None
]
core
.
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
_C_
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
self
.
_found_inf
)
# allreduce_max found_inf in check_group
if
not
self
.
_use_dp_mode
:
...
...
python/paddle/distribution.py
浏览文件 @
f05098b5
...
...
@@ -34,6 +34,7 @@ import numpy as np
import
warnings
from
.fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle
import
_C_ops
__all__
=
[
'Distribution'
,
'Uniform'
,
'Normal'
,
'Categorical'
]
...
...
@@ -151,8 +152,8 @@ class Distribution(object):
warnings
.
warn
(
"dtype of input 'value' needs to be the same as parameters of distribution class. dtype of 'value' will be converted."
)
return
core
.
ops
.
cast
(
value
,
'in_dtype'
,
value
.
dtype
,
'out_dtype'
,
param
.
dtype
)
return
_C_ops
.
cast
(
value
,
'in_dtype'
,
value
.
dtype
,
'out_dtype'
,
param
.
dtype
)
return
value
check_variable_and_dtype
(
value
,
'value'
,
[
'float32'
,
'float64'
],
...
...
@@ -328,9 +329,9 @@ class Uniform(Distribution):
lb_bool
=
self
.
low
<
value
ub_bool
=
value
<
self
.
high
lb
=
core
.
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
lb
=
_C_
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
ub
=
core
.
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
ub
=
_C_
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
return
nn
.
log
(
lb
*
ub
)
-
nn
.
log
(
self
.
high
-
self
.
low
)
...
...
@@ -357,9 +358,9 @@ class Uniform(Distribution):
lb_bool
=
self
.
low
<
value
ub_bool
=
value
<
self
.
high
lb
=
core
.
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
lb
=
_C_
ops
.
cast
(
lb_bool
,
'in_dtype'
,
lb_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
ub
=
core
.
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
ub
=
_C_
ops
.
cast
(
ub_bool
,
'in_dtype'
,
ub_bool
.
dtype
,
'out_dtype'
,
value
.
dtype
)
return
(
lb
*
ub
)
/
(
self
.
high
-
self
.
low
)
...
...
python/paddle/fluid/contrib/layers/nn.py
浏览文件 @
f05098b5
...
...
@@ -50,6 +50,7 @@ from paddle.fluid.param_attr import ParamAttr
from
paddle.fluid.framework
import
Variable
,
convert_np_dtype_to_dtype_
from
paddle.fluid.layers
import
slice
,
reshape
import
warnings
from
paddle
import
_C_ops
__all__
=
[
'fused_elemwise_activation'
,
'sequence_topk_avg_pooling'
,
'var_conv_2d'
,
...
...
@@ -1540,7 +1541,7 @@ def bilateral_slice(x, guide, grid, has_offset, name=None):
"""
if
paddle
.
fluid
.
in_dygraph_mode
():
attrs
=
(
'has_offset'
,
has_offset
)
return
getattr
(
core
.
ops
,
"bilateral_slice"
)(
x
,
grid
,
guide
,
*
attrs
)
return
getattr
(
_C_
ops
,
"bilateral_slice"
)(
x
,
grid
,
guide
,
*
attrs
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'bilateral_slice'
)
check_variable_and_dtype
(
guide
,
'guide'
,
[
'float32'
,
'float64'
],
...
...
@@ -1616,7 +1617,7 @@ def correlation(x,
attrs
=
(
"pad_size"
,
pad_size
,
"kernel_size"
,
kernel_size
,
"max_displacement"
,
max_displacement
,
"stride1"
,
stride1
,
"stride2"
,
stride2
,
"corr_type_multiply"
,
corr_type_multiply
)
output
=
getattr
(
core
.
ops
,
"correlation"
)(
x
,
y
,
*
attrs
)
output
=
getattr
(
_C_
ops
,
"correlation"
)(
x
,
y
,
*
attrs
)
else
:
helper
=
LayerHelper
(
"correlation"
,
**
locals
())
output
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
python/paddle/fluid/contrib/optimizer.py
浏览文件 @
f05098b5
...
...
@@ -21,6 +21,7 @@ from paddle.fluid import unique_name
from
paddle.fluid
import
layers
from
paddle.fluid.layer_helper
import
LayerHelper
import
warnings
from
paddle
import
_C_ops
__all__
=
[
'Momentum'
]
...
...
@@ -203,7 +204,7 @@ class Momentum(Optimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
_
,
_
=
core
.
ops
.
momentum
(
_
,
_
=
_C_
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
'use_nesterov'
,
self
.
_use_nesterov
,
'regularization_method'
,
...
...
python/paddle/fluid/dygraph/amp/loss_scaler.py
浏览文件 @
f05098b5
...
...
@@ -20,6 +20,7 @@ from paddle.fluid.data_feeder import check_type
from
...wrapped_decorator
import
signature_safe_contextmanager
,
wrap_decorator
import
warnings
import
numpy
as
np
from
paddle
import
_C_ops
__all__
=
[
'AmpScaler'
]
...
...
@@ -215,7 +216,7 @@ class AmpScaler(object):
param
.
_grad_ivar
()
for
param
in
optimizer
.
_parameter_list
if
param
.
_grad_ivar
()
is
not
None
]
core
.
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
_C_
ops
.
check_finite_and_unscale
(
param_grads
,
self
.
_scale
,
param_grads
,
self
.
_found_inf
)
def
_update
(
self
):
...
...
python/paddle/fluid/dygraph/dygraph_to_static/partial_program.py
浏览文件 @
f05098b5
...
...
@@ -25,6 +25,7 @@ from paddle.fluid.dygraph.dygraph_to_static.return_transformer import RETURN_NO_
from
paddle.fluid.layers.utils
import
flatten
from
paddle.fluid.layers.utils
import
pack_sequence_as
import
paddle.compat
as
cpt
from
paddle
import
_C_ops
class
NestSequence
(
object
):
...
...
@@ -228,7 +229,7 @@ class PartialProgramLayer:
attrs
=
(
'global_block'
,
self
.
program
.
desc
.
block
(
0
),
'start_op_index'
,
0
,
'end_op_index'
,
self
.
_infer_program
.
desc
.
block
(
0
).
op_size
(),
'is_test'
,
not
self
.
training
)
core
.
ops
.
run_program
(
_C_
ops
.
run_program
(
self
.
_valid_vars
(
in_vars
),
self
.
_valid_vars
(
self
.
_params
),
self
.
_valid_vars
(
out_vars
),
self
.
_tmp_scope_vec
,
self
.
_double_grads
,
...
...
python/paddle/fluid/dygraph/inplace_utils.py
浏览文件 @
f05098b5
...
...
@@ -16,9 +16,10 @@ from ..wrapped_decorator import wrap_decorator
from
..framework
import
in_dygraph_mode
import
warnings
import
paddle
from
paddle
import
_C_ops
# NOTE(pangyoki): The Inplace APIs with underline(`_`) is only valid for the method of calling `
core.
ops`
# NOTE(pangyoki): The Inplace APIs with underline(`_`) is only valid for the method of calling `
_C_
ops`
# in dygraph mode. If static mode is used, the inplace mechanism will not be used, and the static method
# of the original API will be called.
def
_inplace_apis_in_dygraph_only_
(
func
):
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
f05098b5
...
...
@@ -21,6 +21,7 @@ from . import no_grad
import
numpy
as
np
import
warnings
from
paddle
import
_C_ops
_supported_int_dtype_
=
[
core
.
VarDesc
.
VarType
.
UINT8
,
...
...
@@ -67,8 +68,8 @@ def monkey_patch_math_varbase():
@
no_grad
def
create_tensor
(
value
,
dtype
,
shape
):
out
=
_varbase_creator
(
dtype
=
dtype
)
out
=
core
.
ops
.
fill_constant
(
out
,
'dtype'
,
dtype
,
'shape'
,
shape
,
'value'
,
value
,
'force_cpu'
,
False
)
out
=
_C_ops
.
fill_constant
(
out
,
'dtype'
,
dtype
,
'shape'
,
shape
,
'value'
,
value
,
'force_cpu'
,
False
)
out
.
stop_gradient
=
True
return
out
...
...
@@ -100,10 +101,10 @@ def monkey_patch_math_varbase():
"""
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
return
core
.
ops
.
cast
(
self
,
'in_dtype'
,
self
.
dtype
,
'out_dtype'
,
dtype
)
return
_C_
ops
.
cast
(
self
,
'in_dtype'
,
self
.
dtype
,
'out_dtype'
,
dtype
)
def
_scalar_elementwise_op_
(
var
,
scale
,
bias
):
return
core
.
ops
.
scale
(
var
,
'scale'
,
scale
,
'bias'
,
bias
)
return
_C_
ops
.
scale
(
var
,
'scale'
,
scale
,
'bias'
,
bias
)
def
_neg_
(
var
):
return
_scalar_elementwise_op_
(
var
,
-
1.0
,
0.0
)
...
...
@@ -242,7 +243,7 @@ def monkey_patch_math_varbase():
# 4. calculation
axis
=
-
1
math_op
=
getattr
(
core
.
ops
,
op_type
)
math_op
=
getattr
(
_C_
ops
,
op_type
)
return
math_op
(
self
,
other_var
,
'axis'
,
axis
)
comment
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
).
comment
...
...
python/paddle/fluid/dygraph/nn.py
浏览文件 @
f05098b5
...
...
@@ -33,6 +33,7 @@ import numbers
import
logging
import
os
import
paddle.utils.deprecated
as
deprecated
from
paddle
import
_C_ops
__all__
=
[
'Conv2D'
,
'Conv3D'
,
'Pool2D'
,
'Linear'
,
'BatchNorm'
,
'Dropout'
,
'Embedding'
,
...
...
@@ -236,7 +237,7 @@ class Conv2D(layers.Layer):
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
if
self
.
_groups
else
1
,
'use_cudnn'
,
self
.
_use_cudnn
,
'use_mkldnn'
,
self
.
_use_mkldnn
)
out
=
core
.
ops
.
conv2d
(
input
,
self
.
weight
,
*
attrs
)
out
=
_C_
ops
.
conv2d
(
input
,
self
.
weight
,
*
attrs
)
pre_bias
=
out
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
...
...
@@ -866,7 +867,7 @@ class Pool2D(layers.Layer):
'use_cudnn'
,
self
.
_use_cudnn
,
'ceil_mode'
,
self
.
_ceil_mode
,
'use_mkldnn'
,
self
.
_use_mkldnn
,
'exclusive'
,
self
.
_exclusive
,
'data_format'
,
self
.
_data_format
)
return
core
.
ops
.
pool2d
(
input
,
*
attrs
)
return
_C_
ops
.
pool2d
(
input
,
*
attrs
)
check_variable_and_dtype
(
input
,
'input'
,
[
'int8'
,
'uint8'
,
'float16'
,
'float32'
,
'float64'
],
...
...
@@ -971,7 +972,7 @@ class Linear(layers.Layer):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
pre_bias
=
_varbase_creator
(
dtype
=
input
.
dtype
)
core
.
ops
.
matmul
(
input
,
self
.
weight
,
pre_bias
,
'transpose_X'
,
False
,
_C_
ops
.
matmul
(
input
,
self
.
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
False
,
"alpha"
,
1
,
"use_mkldnn"
,
self
.
_use_mkldnn
)
pre_act
=
dygraph_utils
.
_append_bias_in_dygraph
(
...
...
@@ -1116,7 +1117,7 @@ class InstanceNorm(layers.Layer):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
out
,
_
,
_
=
_C_
ops
.
instance_norm
(
input
,
self
.
scale
,
self
.
bias
,
'epsilon'
,
self
.
_epsilon
)
return
out
...
...
@@ -1337,7 +1338,7 @@ class BatchNorm(layers.Layer):
"fuse_with_relu"
,
self
.
_fuse_with_relu
,
"use_global_stats"
,
self
.
_use_global_stats
,
'trainable_statistics'
,
self
.
_trainable_statistics
)
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
batch_norm
(
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
batch_norm
(
input
,
self
.
weight
,
self
.
bias
,
self
.
_mean
,
self
.
_variance
,
mean_out
,
variance_out
,
*
attrs
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
...
@@ -1488,7 +1489,7 @@ class Dropout(layers.Layer):
if
in_dygraph_mode
():
attrs
=
sum
(
attrs
.
items
(),
())
out
,
mask
=
core
.
ops
.
dropout
(
input
,
*
attrs
)
out
,
mask
=
_C_
ops
.
dropout
(
input
,
*
attrs
)
return
out
out
=
self
.
_helper
.
create_variable_for_type_inference
(
dtype
=
input
.
dtype
)
...
...
@@ -1640,7 +1641,7 @@ class Embedding(layers.Layer):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
return
core
.
ops
.
lookup_table_v2
(
return
_C_
ops
.
lookup_table_v2
(
self
.
weight
,
input
,
'is_sparse'
,
self
.
_is_sparse
,
'is_distributed'
,
self
.
_is_distributed
,
'remote_prefetch'
,
self
.
_remote_prefetch
,
'padding_idx'
,
self
.
_padding_idx
)
...
...
@@ -1794,7 +1795,7 @@ class LayerNorm(layers.Layer):
1
:]
+
', but got input shape '
+
str
(
input_shape
))
if
in_dygraph_mode
():
pre_act
,
_
,
_
=
core
.
ops
.
layer_norm
(
pre_act
,
_
,
_
=
_C_
ops
.
layer_norm
(
input
,
self
.
weight
,
self
.
bias
,
'epsilon'
,
self
.
_epsilon
,
'begin_norm_axis'
,
self
.
_begin_norm_axis
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
...
@@ -1979,7 +1980,7 @@ class GRUUnit(layers.Layer):
def
forward
(
self
,
input
,
hidden
):
if
in_dygraph_mode
():
gate
,
reset_hidden_pre
,
updated_hidden
=
core
.
ops
.
gru_unit
(
gate
,
reset_hidden_pre
,
updated_hidden
=
_C_
ops
.
gru_unit
(
input
,
hidden
,
self
.
weight
,
self
.
bias
,
'activation'
,
self
.
activation
,
'gate_activation'
,
self
.
gate_activation
)
return
updated_hidden
,
reset_hidden_pre
,
gate
...
...
@@ -2665,7 +2666,7 @@ class Conv2DTranspose(layers.Layer):
def
forward
(
self
,
input
):
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
self
.
_op_type
)
op
=
getattr
(
_C_
ops
,
self
.
_op_type
)
out
=
op
(
input
,
self
.
weight
,
'output_size'
,
self
.
_output_size
,
'strides'
,
self
.
_stride
,
'paddings'
,
self
.
_padding
,
'dilations'
,
self
.
_dilation
,
'groups'
,
self
.
_groups
,
...
...
python/paddle/fluid/dygraph_utils.py
浏览文件 @
f05098b5
...
...
@@ -14,6 +14,7 @@
from
.
import
core
from
.framework
import
dygraph_only
from
paddle
import
_C_ops
@
dygraph_only
...
...
@@ -40,7 +41,7 @@ def _append_activation_in_dygraph(input,
if
use_mkldnn
:
attrs
+=
(
'use_mkldnn'
,
use_mkldnn
)
act_op
=
getattr
(
core
.
ops
,
act
)
act_op
=
getattr
(
_C_
ops
,
act
)
return
act_op
(
input
,
*
attrs
)
...
...
@@ -59,5 +60,5 @@ def _append_bias_in_dygraph(input, bias=None, axis=1, use_mkldnn=False):
if
bias
is
None
:
return
input
return
core
.
ops
.
elementwise_add
(
input
,
bias
,
'axis'
,
axis
,
'use_mkldnn'
,
return
_C_
ops
.
elementwise_add
(
input
,
bias
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
python/paddle/fluid/layers/control_flow.py
浏览文件 @
f05098b5
...
...
@@ -29,6 +29,7 @@ from functools import reduce, partial
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
...
import
compat
as
cpt
from
..backward
import
_infer_var_data_type_shape_
from
paddle
import
_C_ops
__all__
=
[
'While'
,
'Switch'
,
'increment'
,
'array_write'
,
'create_array'
,
'less_than'
,
...
...
@@ -3805,7 +3806,7 @@ def is_empty(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
is_empty
(
x
)
return
_C_
ops
.
is_empty
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'is_empty'
)
...
...
python/paddle/fluid/layers/detection.py
浏览文件 @
f05098b5
...
...
@@ -34,6 +34,7 @@ import numpy as np
from
functools
import
reduce
from
..data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
from
paddle.utils
import
deprecated
from
paddle
import
_C_ops
__all__
=
[
'prior_box'
,
...
...
@@ -2990,7 +2991,7 @@ def generate_proposals(scores,
assert
return_rois_num
,
"return_rois_num should be True in dygraph mode."
attrs
=
(
'pre_nms_topN'
,
pre_nms_top_n
,
'post_nms_topN'
,
post_nms_top_n
,
'nms_thresh'
,
nms_thresh
,
'min_size'
,
min_size
,
'eta'
,
eta
)
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
=
core
.
ops
.
generate_proposals
(
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
=
_C_
ops
.
generate_proposals
(
scores
,
bbox_deltas
,
im_info
,
anchors
,
variances
,
*
attrs
)
return
rpn_rois
,
rpn_roi_probs
,
rpn_rois_num
...
...
@@ -3756,7 +3757,7 @@ def distribute_fpn_proposals(fpn_rois,
assert
rois_num
is
not
None
,
"rois_num should not be None in dygraph mode."
attrs
=
(
'min_level'
,
min_level
,
'max_level'
,
max_level
,
'refer_level'
,
refer_level
,
'refer_scale'
,
refer_scale
)
multi_rois
,
restore_ind
,
rois_num_per_level
=
core
.
ops
.
distribute_fpn_proposals
(
multi_rois
,
restore_ind
,
rois_num_per_level
=
_C_
ops
.
distribute_fpn_proposals
(
fpn_rois
,
rois_num
,
num_lvl
,
num_lvl
,
*
attrs
)
return
multi_rois
,
restore_ind
,
rois_num_per_level
...
...
@@ -3952,7 +3953,7 @@ def collect_fpn_proposals(multi_rois,
if
in_dygraph_mode
():
assert
rois_num_per_level
is
not
None
,
"rois_num_per_level should not be None in dygraph mode."
attrs
=
(
'post_nms_topN'
,
post_nms_top_n
)
output_rois
,
rois_num
=
core
.
ops
.
collect_fpn_proposals
(
output_rois
,
rois_num
=
_C_
ops
.
collect_fpn_proposals
(
input_rois
,
input_scores
,
rois_num_per_level
,
*
attrs
)
check_type
(
multi_rois
,
'multi_rois'
,
list
,
'collect_fpn_proposals'
)
...
...
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
f05098b5
...
...
@@ -23,6 +23,7 @@ from ..proto import framework_pb2
from
..framework
import
OpProtoHolder
,
Variable
,
core
,
convert_np_dtype_to_dtype_
,
in_dygraph_mode
from
..layer_helper
import
LayerHelper
from
..data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[
'generate_layer_fn'
,
'generate_activation_fn'
,
'generate_inplace_fn'
,
...
...
@@ -257,7 +258,7 @@ def generate_activation_fn(op_type):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_type
)
op
=
getattr
(
_C_
ops
,
op_type
)
return
op
(
x
)
if
op_type
not
in
[
"abs"
,
"exp"
,
"square"
]:
...
...
@@ -297,7 +298,7 @@ def generate_inplace_fn(inplace_op_type):
def
func
(
x
,
name
=
None
):
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
inplace_op_type
)
op
=
getattr
(
_C_
ops
,
inplace_op_type
)
return
op
(
x
)
warnings
.
warn
(
"In static mode, {}() is the same as {}() and does not perform inplace operation."
.
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
f05098b5
...
...
@@ -27,6 +27,7 @@ from ..param_attr import ParamAttr
from
..initializer
import
NumpyArrayInitializer
,
Constant
from
..
import
core
import
warnings
from
paddle
import
_C_ops
__all__
=
[
'center_loss'
,
...
...
@@ -261,7 +262,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
return
cross_entropy2
(
input
,
label
,
ignore_index
)
if
in_dygraph_mode
():
return
core
.
ops
.
cross_entropy
(
input
,
label
,
"soft_label"
,
soft_label
,
return
_C_
ops
.
cross_entropy
(
input
,
label
,
"soft_label"
,
soft_label
,
"ignore_index"
,
ignore_index
)
inputs
=
{
'X'
:
[
input
],
'Label'
:
[
label
]}
...
...
@@ -278,7 +279,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex):
def
cross_entropy2
(
input
,
label
,
ignore_index
=
kIgnoreIndex
):
if
in_dygraph_mode
():
loss
,
_
,
_
=
core
.
ops
.
cross_entropy2
(
input
,
label
,
'ignore_index'
,
loss
,
_
,
_
=
_C_
ops
.
cross_entropy2
(
input
,
label
,
'ignore_index'
,
ignore_index
)
return
loss
...
...
@@ -335,8 +336,8 @@ def square_error_cost(input, label):
"""
if
in_dygraph_mode
():
minus_out
=
core
.
ops
.
elementwise_sub
(
input
,
label
)
square_out
=
core
.
ops
.
square
(
minus_out
)
minus_out
=
_C_
ops
.
elementwise_sub
(
input
,
label
)
square_out
=
_C_
ops
.
square
(
minus_out
)
return
square_out
check_variable_and_dtype
(
input
,
"input"
,
[
'float32'
,
'float64'
],
...
...
@@ -600,7 +601,7 @@ def warpctc(input,
raise
ValueError
(
"input_length and label_length must not be None in dygraph mode!"
)
grad
,
loss_out
=
core
.
ops
.
warpctc
(
grad
,
loss_out
=
_C_
ops
.
warpctc
(
input
,
label
,
input_length
,
...
...
@@ -1260,12 +1261,12 @@ def softmax_with_cross_entropy(logits,
"""
if
in_dygraph_mode
():
if
core
.
is_compiled_with_npu
():
softmax
,
backprop
,
loss
=
core
.
ops
.
softmax_with_cross_entropy
(
softmax
,
backprop
,
loss
=
_C_
ops
.
softmax_with_cross_entropy
(
logits
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
ignore_index
,
'numeric_stable_mode'
,
numeric_stable_mode
,
'axis'
,
axis
)
else
:
softmax
,
loss
=
core
.
ops
.
softmax_with_cross_entropy
(
softmax
,
loss
=
_C_
ops
.
softmax_with_cross_entropy
(
logits
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
ignore_index
,
'numeric_stable_mode'
,
numeric_stable_mode
,
'axis'
,
axis
)
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
f05098b5
...
...
@@ -25,6 +25,7 @@ from .. import core
from
..param_attr
import
ParamAttr
from
.
import
nn
from
..data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[
'accuracy'
,
'auc'
]
...
...
@@ -84,7 +85,7 @@ def accuracy(input, label, k=1, correct=None, total=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
nn
.
topk
(
input
,
k
=
k
)
_acc
,
_
,
_
=
core
.
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
return
_acc
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
f05098b5
...
...
@@ -39,6 +39,7 @@ from ...utils import deprecated
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
import paddle
from paddle.utils import deprecated
from paddle import _C_ops
__all__ = [
'fc',
...
...
@@ -201,7 +202,7 @@ def _elementwise_op_in_dygraph(x,
act=None,
use_mkldnn=False,
op_name=None):
op = getattr(
core.
ops, op_name)
op = getattr(
_C_
ops, op_name)
out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
return dygraph_utils._append_activation_in_dygraph(
...
...
@@ -1029,7 +1030,7 @@ def dropout(x,
seed = default_main_program().random_seed
if is_test is None:
is_test = not _dygraph_tracer()._train_mode
out, mask =
core.
ops.dropout(
out, mask =
_C_
ops.dropout(
x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed',
seed is not None, 'seed', seed if seed is not None else 0,
'dropout_implementation', dropout_implementation)
...
...
@@ -1333,7 +1334,7 @@ def softmax(input, use_cudnn=True, name=None, axis=-1):
"""
if in_dygraph_mode():
return
core.
ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
return
_C_
ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
inputs = {"X": [input]}
attrs = {"axis": axis, "use_cudnn": use_cudnn}
...
...
@@ -4415,7 +4416,7 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None):
reduce_all = True if dim == None or dim == [] or len(dim) == len(
input.shape) else False
dim = dim if dim != None and dim != [] else [0]
return
core.
ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
return
_C_
ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
'reduce_all', reduce_all)
attrs = {
'dim': dim if dim != None and dim != [] else [0],
...
...
@@ -4898,7 +4899,7 @@ def split(input, num_or_sections, dim=-1, name=None):
raise TypeError(
"The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
"received %s." % (type(num_or_sections)))
return
core.
ops.split(input, num, *attrs)
return
_C_
ops.split(input, num, *attrs)
check_variable_and_dtype(
input, 'input',
...
...
@@ -5133,7 +5134,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
"""
if in_dygraph_mode():
out = _varbase_creator(dtype=x.dtype)
core.
ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
_C_
ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
transpose_y, 'alpha', float(alpha))
return out
...
...
@@ -5265,7 +5266,7 @@ def topk(input, k, name=None):
"""
if in_dygraph_mode():
_k = k.numpy().item(0) if isinstance(k, Variable) else k
out, indices =
core.
ops.top_k(input, 'k', _k)
out, indices =
_C_
ops.top_k(input, 'k', _k)
out.stop_gradient = True
indices.stop_gradient = True
return out, indices
...
...
@@ -5508,7 +5509,7 @@ def transpose(x, perm, name=None):
"""
if in_dygraph_mode():
out, _ =
core.
ops.transpose2(x, 'axis', perm)
out, _ =
_C_
ops.transpose2(x, 'axis', perm)
return out
check_variable_and_dtype(
...
...
@@ -5790,7 +5791,7 @@ def multiplex(inputs, index, name=None):
"""
if in_dygraph_mode():
return
core.
ops.multiplex(index, inputs)
return
_C_
ops.multiplex(index, inputs)
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
...
...
@@ -5976,7 +5977,7 @@ def one_hot(input, depth, allow_out_of_range=False):
assert depth.shape == (
1, ), "depth of type Variable should have shape [1]"
depth = depth.item(0)
out =
core.
ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
out =
_C_
ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
allow_out_of_range)
out.stop_gradient = True
return out
...
...
@@ -6158,10 +6159,10 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item
for item in shape
]
out, _ =
core.
ops.reshape2(x, None, 'shape', shape)
out, _ =
_C_
ops.reshape2(x, None, 'shape', shape)
elif isinstance(shape, Variable):
shape.stop_gradient = True
out, _ =
core.
ops.reshape2(x, shape)
out, _ =
_C_
ops.reshape2(x, shape)
return dygraph_utils._append_activation_in_dygraph(out, act)
...
...
@@ -6282,7 +6283,7 @@ def squeeze(input, axes, name=None):
"""
if in_dygraph_mode():
out, _ =
core.
ops.squeeze2(input, 'axes', axes)
out, _ =
_C_
ops.squeeze2(input, 'axes', axes)
return out
helper = LayerHelper("squeeze", **locals())
...
...
@@ -6342,7 +6343,7 @@ def unsqueeze(input, axes, name=None):
item.numpy().item(0) if isinstance(item, Variable) else item
for item in axes
]
out, _ =
core.
ops.unsqueeze2(input, 'axes', axes)
out, _ =
_C_
ops.unsqueeze2(input, 'axes', axes)
return out
check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
...
...
@@ -6865,8 +6866,7 @@ def label_smooth(label,
raise ValueError("The value of epsilon must be between 0 and 1.")
if in_dygraph_mode():
return core.ops.label_smooth(label, prior_dist, 'epsilon',
float(epsilon))
return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'label_smooth')
...
...
@@ -6957,7 +6957,7 @@ def roi_pool(input,
"""
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
pool_out, argmaxes =
core.
ops.roi_pool(
pool_out, argmaxes =
_C_
ops.roi_pool(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale)
return pool_out, argmaxes
...
...
@@ -7045,7 +7045,7 @@ def roi_align(input,
"""
if in_dygraph_mode():
assert rois_num is not None, "rois_num should not be None in dygraph mode."
align_out =
core.
ops.roi_align(
align_out =
_C_
ops.roi_align(
input, rois, rois_num, "pooled_height", pooled_height,
"pooled_width", pooled_width, "spatial_scale", spatial_scale,
"sampling_ratio", sampling_ratio)
...
...
@@ -8314,7 +8314,7 @@ def gather(input, index, overwrite=True):
output = fluid.layers.gather(x, index)
"""
if in_dygraph_mode():
return
core.
ops.gather(input, index, None, 'overwrite', overwrite)
return
_C_
ops.gather(input, index, None, 'overwrite', overwrite)
check_variable_and_dtype(
input, 'x',
...
...
@@ -8405,7 +8405,7 @@ def gather_nd(input, index, name=None):
"""
if in_dygraph_mode():
return
core.
ops.gather_nd(input, index)
return
_C_
ops.gather_nd(input, index)
check_variable_and_dtype(input, 'input',
['bool', 'float32', 'float64', 'int32', 'int64'],
'gather_np')
...
...
@@ -8578,7 +8578,7 @@ def scatter_nd_add(ref, index, updates, name=None):
"""
if in_dygraph_mode():
op = getattr(
core.
ops, 'scatter_nd_add')
op = getattr(
_C_
ops, 'scatter_nd_add')
return op(ref, index, updates)
if ref.dtype != updates.dtype:
...
...
@@ -8724,7 +8724,7 @@ def log(x, name=None):
# [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
"""
if in_dygraph_mode():
return
core.
ops.log(x)
return
_C_
ops.log(x)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
inputs = {'X': [x]}
...
...
@@ -8764,7 +8764,7 @@ def relu(x, name=None):
# [1. 2.6]]
"""
if in_dygraph_mode():
return
core.
ops.relu(x)
return
_C_
ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
...
...
@@ -8890,7 +8890,7 @@ def mean_iou(input, label, num_classes):
mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes)
"""
if in_dygraph_mode():
return
core.
ops.mean_iou(input, label, 'num_classes', num_classes)
return
_C_
ops.mean_iou(input, label, 'num_classes', num_classes)
helper = LayerHelper('mean_iou', **locals())
check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
...
...
@@ -9390,7 +9390,7 @@ def pad2d(input,
if in_dygraph_mode():
_paddings = paddings.numpy().tolist() if isinstance(
paddings, Variable) else paddings
return
core.
ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
return
_C_
ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
'data_format', data_format, 'paddings', _paddings)
check_variable_and_dtype(
...
...
@@ -9587,7 +9587,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
"""
if in_dygraph_mode():
return
core.
ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
return
_C_
ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')
...
...
@@ -9629,7 +9629,7 @@ def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
"""
if in_dygraph_mode():
return
core.
ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
return
_C_
ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hard_sigmoid')
...
...
@@ -9839,7 +9839,7 @@ def brelu(x, t_min=0.0, t_max=24.0, name=None):
#[ 1. 10.]]
"""
if in_dygraph_mode():
return
core.
ops.brelu(x, 't_min', t_min, 't_max', t_max)
return
_C_
ops.brelu(x, 't_min', t_min, 't_max', t_max)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')
...
...
@@ -10098,7 +10098,7 @@ def stack(x, axis=0, name=None):
axis = 0 if axis is None else axis
if in_dygraph_mode():
return
core.
ops.stack(x, 'axis', axis)
return
_C_
ops.stack(x, 'axis', axis)
if not isinstance(x, list) and not isinstance(x, tuple):
# NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
...
...
@@ -10251,7 +10251,7 @@ def unstack(x, axis=0, num=None):
if in_dygraph_mode():
if num == None:
num = x.shape[axis]
return
core.
ops.unstack(x, num, 'axis', int(axis), 'num', num)
return
_C_
ops.unstack(x, num, 'axis', int(axis), 'num', num)
helper = LayerHelper('unstack', **locals())
if num is None:
...
...
@@ -10347,7 +10347,7 @@ def expand(x, expand_times, name=None):
expand_times_tensor = expand_times
expand_times_tensor.stop_gradient = True
return
core.
ops.expand(x, expand_times_tensor, *attrs)
return
_C_
ops.expand(x, expand_times_tensor, *attrs)
inputs = {"X": [x]}
attrs = {}
...
...
@@ -10455,7 +10455,7 @@ def expand_as(x, target_tensor, name=None):
"""
if in_dygraph_mode():
return
core.
ops.expand_as(x, target_tensor)
return
_C_
ops.expand_as(x, target_tensor)
check_variable_and_dtype(
x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
...
...
@@ -10671,10 +10671,9 @@ def gaussian_random(shape,
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return
core.
ops.gaussian_random('shape', shape, 'mean',
return
_C_
ops.gaussian_random('shape', shape, 'mean',
float(mean), 'std',
float(std), 'seed', seed, 'dtype',
dtype)
float(std), 'seed', seed, 'dtype', dtype)
check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn')
check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn')
...
...
@@ -10979,7 +10978,7 @@ def slice(input, axes, starts, ends):
ends_tensor.stop_gradient = True
infer_flags = list(-1 for i in range(len(axes)))
return
core.
ops.slice(input, starts_tensor, ends_tensor, 'axes', axes,
return
_C_
ops.slice(input, starts_tensor, ends_tensor, 'axes', axes,
'infer_flags', infer_flags, *attrs)
if not isinstance(starts, (list, tuple, Variable)):
...
...
@@ -11370,7 +11369,7 @@ def size(input):
"""
if in_dygraph_mode():
return
core.
ops.size(input)
return
_C_
ops.size(input)
check_variable_and_dtype(
input, 'input',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size")
...
...
@@ -11459,7 +11458,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
if in_dygraph_mode():
_scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
out =
core.
ops.scale(x, 'scale',
out =
_C_
ops.scale(x, 'scale',
float(_scale), 'bias',
float(bias), 'bias_after_scale', bias_after_scale)
return dygraph_utils._append_activation_in_dygraph(out)
...
...
@@ -12147,7 +12146,7 @@ Examples:
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
if in_dygraph_mode():
op = getattr(
core.
ops, op_name)
op = getattr(
_C_
ops, op_name)
if binary_op:
return op(x, y)
else:
...
...
@@ -12404,7 +12403,7 @@ def clip_by_norm(x, max_norm, name=None):
"""
if in_dygraph_mode():
return
core.
ops.clip_by_norm(x, 'max_norm', max_norm)
return
_C_
ops.clip_by_norm(x, 'max_norm', max_norm)
helper = LayerHelper("clip_by_norm", **locals())
check_variable_and_dtype(x, 'X', ['float32'], 'clip_by_norm')
...
...
@@ -12449,7 +12448,7 @@ def mean(x, name=None):
"""
if in_dygraph_mode():
return
core.
ops.mean(x)
return
_C_
ops.mean(x)
helper = LayerHelper("mean", **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
...
...
@@ -12530,7 +12529,7 @@ def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
"""
if in_dygraph_mode():
return
core.
ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
return
_C_
ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
'y_num_col_dims', y_num_col_dims)
inputs = {"X": [x], "Y": [y]}
...
...
@@ -13156,8 +13155,7 @@ def add_position_encoding(input, alpha, beta, name=None):
"""
if in_dygraph_mode():
return core.ops.add_position_encoding(input, "alpha", alpha, "beta",
beta)
return _C_ops.add_position_encoding(input, "alpha", alpha, "beta", beta)
helper = LayerHelper('add_position_encoding', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
...
...
@@ -13411,7 +13409,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. "
"Received Attr(data_format): {}.".format(data_format))
if in_dygraph_mode():
return
core.
ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio',
return
_C_
ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio',
shift_ratio, 'data_format', data_format)
helper = LayerHelper("temporal_shift", **locals())
...
...
@@ -14107,7 +14105,7 @@ def where(condition):
"""
if in_dygraph_mode():
return
core.
ops.where_index(condition)
return
_C_
ops.where_index(condition)
helper = LayerHelper("where_index", **locals())
...
...
@@ -14890,7 +14888,7 @@ def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
print(out) # [[0.66666667, 1.66666667,3., 4.]]
"""
if in_dygraph_mode():
return
core.
ops.hard_swish(x, 'threshold', threshold, 'scale', scale,
return
_C_
ops.hard_swish(x, 'threshold', threshold, 'scale', scale,
'offset', offset)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
...
...
@@ -15045,7 +15043,7 @@ def gather_tree(ids, parents):
"""
if in_dygraph_mode():
return
core.
ops.gather_tree(ids, parents)
return
_C_
ops.gather_tree(ids, parents)
else:
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
...
...
@@ -15143,7 +15141,7 @@ def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
if in_dygraph_mode():
shape = utils.convert_shape_to_list(shape)
return
core.
ops.uniform_random('shape', shape, 'min',
return
_C_
ops.uniform_random('shape', shape, 'min',
float(min), 'max',
float(max), 'seed', seed, 'dtype', dtype)
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
f05098b5
...
...
@@ -32,6 +32,7 @@ from ..data_feeder import check_variable_and_dtype, check_type, check_dtype, con
from
paddle.utils
import
deprecated
from
.utils
import
check_shape
from
paddle
import
_C_ops
__all__
=
[
'create_tensor'
,
...
...
@@ -237,7 +238,7 @@ def cast(x, dtype):
if
in_dygraph_mode
():
if
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
out
=
core
.
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
out
=
_C_
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
out
check_variable_and_dtype
(
x
,
'x'
,
[
...
...
@@ -313,7 +314,7 @@ def concat(input, axis=0, name=None):
if
isinstance
(
axis
,
Variable
):
axis
=
axis
.
numpy
()
axis
=
axis
.
item
(
0
)
return
core
.
ops
.
concat
(
input
,
'axis'
,
axis
)
return
_C_
ops
.
concat
(
input
,
'axis'
,
axis
)
check_type
(
input
,
'input'
,
(
list
,
tuple
,
Variable
),
'concat'
)
if
not
isinstance
(
input
,
Variable
):
...
...
@@ -721,7 +722,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None):
else
:
attrs
[
'str_value'
]
=
str
(
float
(
value
.
numpy
().
item
(
0
)))
core
.
ops
.
fill_constant
(
out
,
'value'
,
_C_
ops
.
fill_constant
(
out
,
'value'
,
float
(
value
),
'force_cpu'
,
force_cpu
,
'dtype'
,
out
.
dtype
,
'str_value'
,
attrs
[
'str_value'
],
'shape'
,
shape
)
...
...
@@ -1281,7 +1282,7 @@ def has_inf(x):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
isinf
(
x
)
return
_C_
ops
.
isinf
(
x
)
check_type
(
x
,
'x'
,
(
Variable
),
'has_inf'
)
helper
=
LayerHelper
(
"isinf"
,
**
locals
())
...
...
@@ -1310,7 +1311,7 @@ def has_nan(x):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
isnan
(
x
)
return
_C_
ops
.
isnan
(
x
)
check_type
(
x
,
'x'
,
(
Variable
),
'has_nan'
)
helper
=
LayerHelper
(
"isnan"
,
**
locals
())
...
...
@@ -1422,7 +1423,7 @@ def range(start, end, step, dtype, name=None):
step
=
cast
(
step
,
dtype
)
if
in_dygraph_mode
():
return
core
.
ops
.
range
(
start
,
end
,
step
)
return
_C_
ops
.
range
(
start
,
end
,
step
)
out_shape
=
None
if
not
isinstance
(
start
,
Variable
)
and
not
isinstance
(
...
...
@@ -1491,7 +1492,7 @@ def linspace(start, stop, num, dtype=None, name=None):
with
device_guard
(
"cpu"
):
tensor_num
=
fill_constant
([
1
],
'int32'
,
num
)
if
in_dygraph_mode
():
return
core
.
ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
return
_C_
ops
.
linspace
(
tensor_start
,
tensor_stop
,
tensor_num
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"linspace"
,
**
locals
())
...
...
@@ -1679,7 +1680,7 @@ def eye(num_rows,
num_columns
=
num_rows
if
in_dygraph_mode
():
out
=
core
.
ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
out
=
_C_
ops
.
eye
(
'dtype'
,
dtype
,
'num_rows'
,
num_rows
,
'num_columns'
,
num_columns
)
else
:
...
...
@@ -1705,8 +1706,8 @@ def eye(num_rows,
re_shape
=
re_shape
+
[
num_rows
,
num_columns
]
expand_times
=
batch_shape
+
[
1
,
1
]
if
in_dygraph_mode
():
out
=
core
.
ops
.
reshape
(
out
,
'shape'
,
re_shape
)
return
core
.
ops
.
expand
(
out
,
None
,
'expand_times'
,
expand_times
)
out
=
_C_
ops
.
reshape
(
out
,
'shape'
,
re_shape
)
return
_C_
ops
.
expand
(
out
,
None
,
'expand_times'
,
expand_times
)
if
not
isinstance
(
batch_shape
,
list
):
raise
TypeError
(
"batch_shape should be a list"
)
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
f05098b5
...
...
@@ -43,6 +43,7 @@ from functools import cmp_to_key
from
.wrapped_decorator
import
signature_safe_contextmanager
from
..
import
compat
as
cpt
import
warnings
from
paddle
import
_C_ops
__all__
=
[
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'Dpsgd'
,
'DecayedAdagrad'
,
...
...
@@ -915,7 +916,7 @@ class Optimizer(object):
assert
regularization_term
is
not
None
if
framework
.
in_dygraph_mode
():
return
core
.
ops
.
sum
([
grad
,
regularization_term
])
return
_C_
ops
.
sum
([
grad
,
regularization_term
])
new_grad
=
grad
if
grad
.
type
==
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
:
...
...
@@ -1295,7 +1296,7 @@ class SGDOptimizer(Optimizer):
def
_append_optimize_op
(
self
,
block
,
param_and_grad
):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
core
.
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
_C_
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
param_and_grad
[
0
])
return
None
...
...
@@ -1420,7 +1421,7 @@ class MomentumOptimizer(Optimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
_
,
_
=
core
.
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
_
,
_
=
_C_
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
'use_nesterov'
,
self
.
_use_nesterov
)
...
...
@@ -2447,7 +2448,7 @@ class AdamOptimizer(Optimizer):
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
_beta2
=
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
adam
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
adam
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'epsilon'
,
self
.
_epsilon
,
...
...
@@ -3510,7 +3511,7 @@ class LambOptimizer(AdamOptimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
lamb
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
lamb
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'beta1'
,
self
.
_beta1
,
...
...
python/paddle/fluid/regularizer.py
浏览文件 @
f05098b5
...
...
@@ -18,6 +18,7 @@ import logging
from
.
import
framework
from
.framework
import
in_dygraph_mode
,
_varbase_creator
from
.
import
core
from
paddle
import
_C_ops
__all__
=
[
'L1Decay'
,
'L2Decay'
,
'L1DecayRegularizer'
,
'L2DecayRegularizer'
]
...
...
@@ -133,7 +134,7 @@ class L2DecayRegularizer(WeightDecayRegularizer):
assert
isinstance
(
block
,
framework
.
Block
)
if
framework
.
in_dygraph_mode
():
return
core
.
ops
.
scale
(
param
,
"scale"
,
self
.
_regularization_coeff
)
return
_C_
ops
.
scale
(
param
,
"scale"
,
self
.
_regularization_coeff
)
else
:
decay
=
block
.
create_var
(
dtype
=
param
.
dtype
,
shape
=
param
.
shape
,
lod_level
=
param
.
lod_level
)
...
...
python/paddle/fluid/tests/unittests/test_deprecated_decorator.py
浏览文件 @
f05098b5
...
...
@@ -23,7 +23,7 @@ import paddle.fluid.core as core
import
sys
import
warnings
import
paddle.utils.deprecated
as
deprecated
from
paddle
import
_C_ops
LOWEST_WARNING_POSTION
=
3
ERROR_WARNING_POSTION
=
sys
.
maxsize
...
...
python/paddle/incubate/optimizer/modelaverage.py
浏览文件 @
f05098b5
...
...
@@ -20,6 +20,7 @@ import paddle
import
numpy
as
np
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.wrapped_decorator
import
signature_safe_contextmanager
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -226,7 +227,7 @@ class ModelAverage(Optimizer):
param_and_grad
[
0
])
num_updates
=
self
.
_get_accumulator
(
'num_updates'
,
param_and_grad
[
0
])
if
framework
.
in_dygraph_mode
():
_
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
average_accumulates
(
_
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
average_accumulates
(
param_and_grad
[
0
],
sum_1
,
sum_2
,
sum_3
,
num_accumulates
,
old_num_accumulates
,
num_updates
,
sum_1
,
sum_2
,
sum_3
,
num_accumulates
,
old_num_accumulates
,
num_updates
,
...
...
python/paddle/metric/metrics.py
浏览文件 @
f05098b5
...
...
@@ -25,6 +25,7 @@ from ..fluid.layer_helper import LayerHelper
from
..fluid.layers.nn
import
topk
from
..fluid.framework
import
core
,
_varbase_creator
,
in_dygraph_mode
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -798,7 +799,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
topk
(
input
,
k
=
k
)
_acc
,
_
,
_
=
core
.
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_
ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
return
_acc
...
...
python/paddle/nn/functional/activation.py
浏览文件 @
f05098b5
...
...
@@ -26,6 +26,7 @@ from ...fluid.framework import in_dygraph_mode, convert_np_dtype_to_dtype_
from
...fluid
import
core
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -60,7 +61,7 @@ def elu(x, alpha=1.0, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
elu
(
x
,
'alpha'
,
alpha
)
return
_C_
ops
.
elu
(
x
,
'alpha'
,
alpha
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'elu'
)
helper
=
LayerHelper
(
"elu"
,
**
locals
())
...
...
@@ -79,7 +80,7 @@ def elu_(x, alpha=1.0, name=None):
Inplace version of ``elu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_elu`.
"""
return
core
.
ops
.
elu_
(
x
,
'alpha'
,
alpha
)
return
_C_
ops
.
elu_
(
x
,
'alpha'
,
alpha
)
def
gelu
(
x
,
approximate
=
False
,
name
=
None
):
...
...
@@ -123,7 +124,7 @@ def gelu(x, approximate=False, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
gelu
(
x
,
'approximate'
,
approximate
)
return
_C_
ops
.
gelu
(
x
,
'approximate'
,
approximate
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'gelu'
)
helper
=
LayerHelper
(
"gelu"
,
**
locals
())
...
...
@@ -171,7 +172,7 @@ def hardshrink(x, threshold=0.5, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
hard_shrink
(
x
,
'threshold'
,
threshold
)
return
_C_
ops
.
hard_shrink
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardshrink'
)
...
...
@@ -219,7 +220,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
brelu
(
x
,
't_min'
,
min
,
't_max'
,
max
)
return
_C_
ops
.
brelu
(
x
,
't_min'
,
min
,
't_max'
,
max
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardtanh'
)
...
...
@@ -274,7 +275,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
hard_sigmoid
(
x
,
'slope'
,
slope
,
'offset'
,
offset
)
return
_C_
ops
.
hard_sigmoid
(
x
,
'slope'
,
slope
,
'offset'
,
offset
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardsigmoid'
)
...
...
@@ -328,7 +329,7 @@ def hardswish(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
hard_swish
(
x
)
return
_C_
ops
.
hard_swish
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'hardswish'
)
...
...
@@ -373,7 +374,7 @@ def leaky_relu(x, negative_slope=0.01, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
leaky_relu
(
x
,
'alpha'
,
negative_slope
)
return
_C_
ops
.
leaky_relu
(
x
,
'alpha'
,
negative_slope
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'leaky_relu'
)
...
...
@@ -447,7 +448,7 @@ def prelu(x, weight, name=None):
mode
=
'channel'
if
in_dygraph_mode
():
return
core
.
ops
.
prelu
(
x
,
weight
,
'mode'
,
mode
)
return
_C_
ops
.
prelu
(
x
,
weight
,
'mode'
,
mode
)
helper
=
LayerHelper
(
'prelu'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -488,7 +489,7 @@ def relu(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
relu
(
x
)
return
_C_
ops
.
relu
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu'
)
helper
=
LayerHelper
(
'relu'
,
**
locals
())
...
...
@@ -503,7 +504,7 @@ def relu_(x, name=None):
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`.
"""
return
core
.
ops
.
relu_
(
x
)
return
_C_
ops
.
relu_
(
x
)
def
log_sigmoid
(
x
,
name
=
None
):
...
...
@@ -533,7 +534,7 @@ def log_sigmoid(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
logsigmoid
(
x
)
return
_C_
ops
.
logsigmoid
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'log_sigmoid'
)
...
...
@@ -597,7 +598,7 @@ def maxout(x, groups, axis=1, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
return
_C_
ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'maxout'
)
if
axis
not
in
[
1
,
-
1
,
3
]:
...
...
@@ -646,7 +647,7 @@ def relu6(x, name=None):
"""
threshold
=
6.0
if
in_dygraph_mode
():
return
core
.
ops
.
relu6
(
x
,
'threshold'
,
threshold
)
return
_C_
ops
.
relu6
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'relu6'
)
helper
=
LayerHelper
(
'relu6'
,
**
locals
())
...
...
@@ -703,7 +704,7 @@ def selu(x,
"The alpha must be no less than zero. Received: {}."
.
format
(
alpha
))
if
in_dygraph_mode
():
return
core
.
ops
.
selu
(
x
,
'scale'
,
scale
,
'alpha'
,
alpha
)
return
_C_
ops
.
selu
(
x
,
'scale'
,
scale
,
'alpha'
,
alpha
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'selu'
)
helper
=
LayerHelper
(
'selu'
,
**
locals
())
...
...
@@ -741,7 +742,7 @@ def silu(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
silu
(
x
)
return
_C_
ops
.
silu
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'silu'
)
helper
=
LayerHelper
(
"silu"
,
**
locals
())
...
...
@@ -872,8 +873,8 @@ def softmax(x, axis=-1, dtype=None, name=None):
if
in_dygraph_mode
():
outs_cast
=
x
if
dtype
is
None
\
else
core
.
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
core
.
ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
else
_C_
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
_C_
ops
.
softmax
(
outs_cast
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
if
dtype
is
None
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
...
...
@@ -913,7 +914,7 @@ def softmax_(x, axis=-1, dtype=None, name=None):
if
(
dtype
is
not
None
)
and
(
not
isinstance
(
dtype
,
core
.
VarDesc
.
VarType
)):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
use_cudnn
=
True
return
core
.
ops
.
softmax_
(
x
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
return
_C_
ops
.
softmax_
(
x
,
'axis'
,
axis
,
'use_cudnn'
,
use_cudnn
)
def
softplus
(
x
,
beta
=
1
,
threshold
=
20
,
name
=
None
):
...
...
@@ -946,7 +947,7 @@ def softplus(x, beta=1, threshold=20, name=None):
out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
softplus
(
x
,
'beta'
,
beta
,
'threshold'
,
threshold
)
return
_C_
ops
.
softplus
(
x
,
'beta'
,
beta
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softplus'
)
...
...
@@ -998,7 +999,7 @@ def softshrink(x, threshold=0.5, name=None):
threshold
))
if
in_dygraph_mode
():
return
core
.
ops
.
softshrink
(
x
,
'lambda'
,
threshold
)
return
_C_
ops
.
softshrink
(
x
,
'lambda'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softshrink'
)
...
...
@@ -1039,7 +1040,7 @@ def softsign(x, name=None):
out = F.softsign(x) # [-0.285714, -0.166667, 0.0909091, 0.230769]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
softsign
(
x
)
return
_C_
ops
.
softsign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'softsign'
)
...
...
@@ -1077,7 +1078,7 @@ def swish(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
swish
(
x
,
'beta'
,
1.0
)
return
_C_
ops
.
swish
(
x
,
'beta'
,
1.0
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'swish'
)
helper
=
LayerHelper
(
'swish'
,
**
locals
())
...
...
@@ -1117,7 +1118,7 @@ def tanhshrink(x, name=None):
out = F.tanhshrink(x) # [-0.020051, -0.00262468, 0.000332005, 0.00868739]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
tanh_shrink
(
x
)
return
_C_
ops
.
tanh_shrink
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanhshrink'
)
...
...
@@ -1159,7 +1160,7 @@ def thresholded_relu(x, threshold=1.0, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
thresholded_relu
(
x
,
'threshold'
,
threshold
)
return
_C_
ops
.
thresholded_relu
(
x
,
'threshold'
,
threshold
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'thresholded_relu'
)
...
...
@@ -1234,8 +1235,8 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
if
in_dygraph_mode
():
if
dtype
is
not
None
:
x
=
core
.
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
core
.
ops
.
log_softmax
(
x
,
'axis'
,
axis
)
x
=
_C_
ops
.
cast
(
x
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
dtype
)
return
_C_
ops
.
log_softmax
(
x
,
'axis'
,
axis
)
if
dtype
is
None
:
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
...
...
python/paddle/nn/functional/common.py
浏览文件 @
f05098b5
...
...
@@ -33,6 +33,7 @@ from ...fluid.framework import in_dygraph_mode
from
...fluid
import
core
,
dygraph_utils
from
...fluid
import
core
,
layers
from
...fluid.data_feeder
import
check_variable_and_dtype
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -452,15 +453,15 @@ def interpolate(x,
dy_attr
=
tuple
(
attr_list
)
if
resample_type
==
"linear"
:
out
=
core
.
ops
.
linear_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
linear_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"bilinear"
:
out
=
core
.
ops
.
bilinear_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
bilinear_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"trilinear"
:
out
=
core
.
ops
.
trilinear_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
trilinear_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"nearest"
:
out
=
core
.
ops
.
nearest_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
nearest_interp_v2
(
x
,
*
dy_attr
)
elif
resample_type
==
"bicubic"
:
out
=
core
.
ops
.
bicubic_interp_v2
(
x
,
*
dy_attr
)
out
=
_C_
ops
.
bicubic_interp_v2
(
x
,
*
dy_attr
)
return
out
out
=
helper
.
create_variable_for_type_inference
(
dtype
)
helper
.
append_op
(
...
...
@@ -710,7 +711,7 @@ def bilinear(x1, x2, weight, bias=None, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
bilinear_tensor_product
(
x1
,
x2
,
weight
,
bias
)
return
_C_
ops
.
bilinear_tensor_product
(
x1
,
x2
,
weight
,
bias
)
check_variable_and_dtype
(
x1
,
'x1'
,
[
'float32'
,
'float64'
],
'bilinear'
)
check_variable_and_dtype
(
x2
,
'x2'
,
[
'float32'
,
'float64'
],
'bilinear'
)
...
...
@@ -884,7 +885,7 @@ def dropout(x,
if
in_dygraph_mode
():
if
default_main_program
().
random_seed
!=
0
:
seed
=
default_main_program
().
random_seed
out
,
mask
=
core
.
ops
.
dropout
(
out
,
mask
=
_C_
ops
.
dropout
(
x
,
'dropout_prob'
,
p
,
'is_test'
,
not
training
,
'fix_seed'
,
seed
is
not
None
,
'seed'
,
seed
if
seed
is
not
None
else
0
,
'dropout_implementation'
,
mode
)
...
...
@@ -1316,7 +1317,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
if
in_dygraph_mode
():
if
isinstance
(
pad
,
Variable
):
pad
=
pad
.
numpy
()
out
=
core
.
ops
.
pad3d
(
x
,
"paddings"
,
pad
,
"mode"
,
mode
,
"value"
,
value
,
out
=
_C_
ops
.
pad3d
(
x
,
"paddings"
,
pad
,
"mode"
,
mode
,
"value"
,
value
,
"data_format"
,
data_format
,
"name"
,
name
)
else
:
attrs
=
{
'mode'
:
mode
,
'value'
:
value
,
'data_format'
:
data_format
}
...
...
@@ -1447,13 +1448,13 @@ def linear(x, weight, bias=None, name=None):
"""
if
in_dygraph_mode
():
pre_bias
=
_varbase_creator
(
dtype
=
x
.
dtype
)
core
.
ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
False
,
"alpha"
,
1
)
_C_ops
.
matmul
(
x
,
weight
,
pre_bias
,
'transpose_X'
,
False
,
'transpose_Y'
,
False
,
"alpha"
,
1
)
if
bias
is
None
:
return
pre_bias
return
core
.
ops
.
elementwise_add
(
pre_bias
,
bias
)
return
_C_
ops
.
elementwise_add
(
pre_bias
,
bias
)
else
:
helper
=
LayerHelper
(
'linear'
,
**
locals
())
dtype
=
x
.
dtype
...
...
@@ -1546,8 +1547,7 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
raise
ValueError
(
"The value of epsilon must be between 0 and 1."
)
if
in_dygraph_mode
():
return
core
.
ops
.
label_smooth
(
label
,
prior_dist
,
'epsilon'
,
float
(
epsilon
))
return
_C_ops
.
label_smooth
(
label
,
prior_dist
,
'epsilon'
,
float
(
epsilon
))
check_variable_and_dtype
(
label
,
'label'
,
[
'float32'
,
'float64'
],
'label_smooth'
)
...
...
python/paddle/nn/functional/conv.py
浏览文件 @
f05098b5
...
...
@@ -22,6 +22,7 @@ from ...fluid.layers import nn, utils
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid.param_attr
import
ParamAttr
from
...fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -115,7 +116,7 @@ def _conv_nd(x,
use_mkldnn
,
'fuse_relu_before_depthwise_conv'
,
False
,
"padding_algorithm"
,
padding_algorithm
,
"data_format"
,
data_format
)
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
else
:
...
...
@@ -339,7 +340,7 @@ def conv1d(x,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'use_mkldnn'
,
False
,
'fuse_relu_before_depthwise_conv'
,
False
,
"padding_algorithm"
,
padding_algorithm
,
"data_format"
,
conv2d_data_format
)
out
=
getattr
(
core
.
ops
,
l_type
)(
x
,
weight
,
*
attrs
)
out
=
getattr
(
_C_
ops
,
l_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
out
,
bias
,
axis
=
channel_dim
)
else
:
...
...
@@ -775,7 +776,7 @@ def conv1d_transpose(x,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'data_format'
,
conv2d_data_format
)
out
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
out
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
out
,
bias
,
axis
=
channel_dim
)
else
:
...
...
@@ -1010,7 +1011,7 @@ def conv2d_transpose(x,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
'data_format'
,
data_format
)
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
else
:
...
...
@@ -1402,7 +1403,7 @@ def conv3d_transpose(x,
'paddings'
,
padding
,
"padding_algorithm"
,
padding_algorithm
,
'strides'
,
stride
,
'dilations'
,
dilation
,
'groups'
,
groups
,
'use_cudnn'
,
use_cudnn
,
"data_format"
,
data_format_
)
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
weight
,
*
attrs
)
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
channel_dim
)
else
:
...
...
python/paddle/nn/functional/input.py
浏览文件 @
f05098b5
...
...
@@ -18,6 +18,7 @@ from ...fluid.framework import Variable, in_dygraph_mode
from
...fluid.layer_helper
import
LayerHelper
from
...fluid.layers
import
core
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_dtype
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -86,8 +87,8 @@ def one_hot(x, num_classes, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
one_hot_v2
(
x
,
'depth'
,
num_classes
,
'allow_out_of_range'
,
False
)
return
_C_ops
.
one_hot_v2
(
x
,
'depth'
,
num_classes
,
'allow_out_of_range'
,
False
)
else
:
check_variable_and_dtype
(
x
,
'input'
,
[
'int32'
,
'int64'
],
'one_hot_v2'
)
helper
=
LayerHelper
(
"one_hot_v2"
,
**
locals
())
...
...
@@ -195,7 +196,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
weight
.
shape
[
0
],
weight
.
shape
[
0
]))
if
in_dygraph_mode
():
return
core
.
ops
.
lookup_table_v2
(
return
_C_
ops
.
lookup_table_v2
(
weight
,
x
,
'is_sparse'
,
sparse
,
'is_distributed'
,
False
,
'remote_prefetch'
,
False
,
'padding_idx'
,
padding_idx
)
else
:
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
f05098b5
...
...
@@ -38,6 +38,7 @@ from ...fluid.framework import in_dygraph_mode
from
...fluid.framework
import
_varbase_creator
from
...fluid.framework
import
Variable
from
paddle.utils
import
deprecated
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -115,15 +116,15 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
reduction
)
if
in_dygraph_mode
():
out
=
core
.
ops
.
bce_loss
(
input
,
label
)
out
=
_C_
ops
.
bce_loss
(
input
,
label
)
if
weight
is
not
None
:
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight
,
'axis'
,
-
1
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight
,
'axis'
,
-
1
)
if
reduction
==
'sum'
:
return
core
.
ops
.
reduce_sum
(
out
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
return
_C_
ops
.
reduce_sum
(
out
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
"reduce_all"
,
True
)
elif
reduction
==
'mean'
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
else
:
return
out
...
...
@@ -250,22 +251,23 @@ def binary_cross_entropy_with_logits(logit,
if
in_dygraph_mode
():
one
=
_varbase_creator
(
dtype
=
logit
.
dtype
)
core
.
ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
[
1
])
out
=
core
.
ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
_C_
ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
[
1
])
out
=
_C_
ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
if
pos_weight
is
not
None
:
log_weight
=
core
.
ops
.
elementwise_add
(
core
.
ops
.
elementwise_mul
(
label
,
core
.
ops
.
elementwise_sub
(
pos_weight
,
one
)),
one
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
log_weight
)
log_weight
=
_C_ops
.
elementwise_add
(
_C_ops
.
elementwise_mul
(
label
,
_C_ops
.
elementwise_sub
(
pos_weight
,
one
)),
one
)
out
=
_C_ops
.
elementwise_mul
(
out
,
log_weight
)
if
weight
is
not
None
:
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight
)
if
reduction
==
"sum"
:
return
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
elif
reduction
==
"mean"
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
else
:
return
out
...
...
@@ -393,7 +395,7 @@ def hsigmoid_loss(input,
"""
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
hierarchical_sigmoid
(
out
,
_
,
_
=
_C_
ops
.
hierarchical_sigmoid
(
input
,
weight
,
label
,
path_table
,
path_code
,
bias
,
'num_classes'
,
num_classes
,
'is_sparse'
,
is_sparse
,
'remote_prefetch'
,
is_sparse
)
return
out
...
...
@@ -570,16 +572,16 @@ def margin_ranking_loss(input,
"The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed."
%
reduction
)
if
fluid
.
framework
.
in_dygraph_mode
():
out
=
core
.
ops
.
elementwise_sub
(
other
,
input
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
label
)
out
=
_C_
ops
.
elementwise_sub
(
other
,
input
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
label
)
if
margin
!=
0.0
:
margin
=
fluid
.
dygraph
.
base
.
to_variable
([
margin
],
dtype
=
out
.
dtype
)
out
=
core
.
ops
.
elementwise_add
(
out
,
margin
)
out
=
core
.
ops
.
relu
(
out
)
out
=
_C_
ops
.
elementwise_add
(
out
,
margin
)
out
=
_C_
ops
.
relu
(
out
)
if
reduction
==
'sum'
:
return
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
elif
reduction
==
'mean'
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
return
out
helper
=
LayerHelper
(
"margin_ranking_loss"
,
**
locals
())
...
...
@@ -690,9 +692,9 @@ def l1_loss(input, label, reduction='mean', name=None):
unreduced
=
_elementwise_op_in_dygraph
(
input
,
label
,
axis
=-
1
,
act
=
'abs'
,
op_name
=
'elementwise_sub'
)
if
reduction
==
'mean'
:
return
core
.
ops
.
mean
(
unreduced
)
return
_C_
ops
.
mean
(
unreduced
)
elif
reduction
==
'sum'
:
return
core
.
ops
.
reduce_sum
(
unreduced
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
return
_C_
ops
.
reduce_sum
(
unreduced
,
'dim'
,
[
0
],
'keep_dim'
,
False
,
'reduce_all'
,
True
)
else
:
return
unreduced
...
...
@@ -778,14 +780,14 @@ def nll_loss(input,
c
=
input_shape
[
1
]
if
in_dygraph_mode
():
if
input_dims
!=
2
and
input_dims
!=
4
:
input
,
_
=
core
.
ops
.
reshape2
(
input
,
None
,
'shape'
,
[
n
,
c
,
1
,
-
1
])
label
,
_
=
core
.
ops
.
reshape2
(
label
,
None
,
'shape'
,
[
n
,
1
,
-
1
])
input
,
_
=
_C_
ops
.
reshape2
(
input
,
None
,
'shape'
,
[
n
,
c
,
1
,
-
1
])
label
,
_
=
_C_
ops
.
reshape2
(
label
,
None
,
'shape'
,
[
n
,
1
,
-
1
])
out_shape
=
[
n
]
+
input_shape
[
2
:]
out
,
total_weight
=
core
.
ops
.
nll_loss
(
input
,
label
,
weight
,
out
,
total_weight
=
_C_
ops
.
nll_loss
(
input
,
label
,
weight
,
'ignore_index'
,
ignore_index
,
'reduction'
,
reduction
)
if
input_dims
!=
2
and
input_dims
!=
4
and
reduction
==
'none'
:
out
,
_
=
core
.
ops
.
reshape2
(
out
,
None
,
'shape'
,
out_shape
)
out
,
_
=
_C_
ops
.
reshape2
(
out
,
None
,
'shape'
,
out_shape
)
return
out
helper
=
LayerHelper
(
'nll_loss'
,
**
locals
())
...
...
@@ -903,7 +905,7 @@ def kl_div(input, label, reduction='mean', name=None):
label
=
fluid
.
layers
.
cast
(
label
,
'float64'
)
if
paddle
.
in_dynamic_mode
():
out
=
core
.
ops
.
kldiv_loss
(
input
,
label
,
'reduction'
,
reduction
)
out
=
_C_
ops
.
kldiv_loss
(
input
,
label
,
'reduction'
,
reduction
)
return
out
helper
=
LayerHelper
(
'kl_div'
,
**
locals
())
...
...
@@ -1386,7 +1388,7 @@ def cross_entropy(input,
if
input_dims
-
1
==
label_dims
:
label
=
paddle
.
unsqueeze
(
label
,
axis
=
axis
)
if
in_dygraph_mode
():
_
,
out
=
core
.
ops
.
softmax_with_cross_entropy
(
_
,
out
=
_C_
ops
.
softmax_with_cross_entropy
(
input
,
label
,
'soft_label'
,
soft_label
,
'ignore_index'
,
ignore_index
,
'numeric_stable_mode'
,
True
,
'axis'
,
axis
,
'use_softmax'
,
use_softmax
)
...
...
@@ -1408,7 +1410,7 @@ def cross_entropy(input,
weight_gather_reshape
=
reshape
(
weight_gather
,
shape
=
out_shape
)
out
=
paddle
.
cast
(
out
,
weight_gather_reshape
.
dtype
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
else
:
label_min
=
paddle
.
min
(
label
)
...
...
@@ -1418,18 +1420,18 @@ def cross_entropy(input,
'Expected 0 <= label_value < class_dimension({}), but got {} <= label_value <= {} '
.
format
(
input
.
shape
[
-
1
],
label_min
.
numpy
(),
label_max
.
numpy
()))
weight_gather
=
core
.
ops
.
gather_nd
(
weight
,
label
)
weight_gather
=
_C_
ops
.
gather_nd
(
weight
,
label
)
input_shape
=
list
(
label
.
shape
)
weight_gather_reshape
=
reshape
(
weight_gather
,
shape
=
input_shape
)
out
=
paddle
.
cast
(
out
,
weight_gather_reshape
.
dtype
)
out
=
core
.
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
out
=
_C_
ops
.
elementwise_mul
(
out
,
weight_gather_reshape
)
if
reduction
==
"sum"
:
# because of fluid_softmax_with_cross_entropy op's inner logic,
# in the out tensor of this op, the loss of sample with class_index==ignore_index is 0
# so, reduce_sum all directly is ok
return
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
elif
reduction
==
"mean"
:
#1. if weight==none,
# numerator: reduce_sum all loss directly is ok causeof fluid_softmax_with_cross_entropy's inner logic
...
...
@@ -1438,30 +1440,30 @@ def cross_entropy(input,
# numerator: loss's weighted sum
# denominator: cal the sum of weight where the sample's class_index!=ignore_index
if
ignore_index
!=
-
100
:
out_sum
=
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
out_sum
=
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
#for each label[i],set 1 or 0, according to ignore_index
#mask[i]=0, if label[i]==ignore_index
#mask[i]=1, otherwise
mask
=
(
label
!=
ignore_index
)
if
weight
is
None
:
mask
=
paddle
.
cast
(
mask
,
dtype
=
out_sum
.
dtype
)
count
=
core
.
ops
.
reduce_sum
(
mask
,
'reduce_all'
,
True
)
count
=
_C_
ops
.
reduce_sum
(
mask
,
'reduce_all'
,
True
)
ret
=
out_sum
/
(
count
+
(
count
==
0.0
))
else
:
mask
=
paddle
.
cast
(
mask
,
weight_gather_reshape
.
dtype
)
weight_ignored
=
core
.
ops
.
elementwise_mul
(
weight_ignored
=
_C_
ops
.
elementwise_mul
(
mask
,
weight_gather_reshape
)
weight_sum
=
core
.
ops
.
reduce_sum
(
weight_ignored
,
'reduce_all'
,
True
)
weight_sum
=
_C_ops
.
reduce_sum
(
weight_ignored
,
'reduce_all'
,
True
)
ret
=
out_sum
/
(
weight_sum
+
(
weight_sum
==
0.0
))
return
ret
elif
weight
is
not
None
:
out_sum
=
core
.
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
total_weight
=
core
.
ops
.
reduce_sum
(
weight_gather_reshape
,
out_sum
=
_C_
ops
.
reduce_sum
(
out
,
'reduce_all'
,
True
)
total_weight
=
_C_
ops
.
reduce_sum
(
weight_gather_reshape
,
'reduce_all'
,
True
)
return
out_sum
/
(
total_weight
+
(
total_weight
==
0.0
))
else
:
return
core
.
ops
.
mean
(
out
)
return
_C_
ops
.
mean
(
out
)
else
:
if
input_dims
-
1
==
label_dims
:
...
...
@@ -1645,38 +1647,37 @@ def sigmoid_focal_loss(logit,
if
in_dygraph_mode
():
one
=
_varbase_creator
(
dtype
=
logit
.
dtype
)
core
.
ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
logit
.
shape
)
loss
=
core
.
ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
pred
=
core
.
ops
.
sigmoid
(
logit
)
p_t
=
core
.
ops
.
elementwise_add
(
core
.
ops
.
elementwise_mul
(
pred
,
label
),
core
.
ops
.
elementwise_mul
(
core
.
ops
.
elementwise_sub
(
one
,
pred
),
core
.
ops
.
elementwise_sub
(
one
,
label
)))
_C_ops
.
fill_constant
(
one
,
'value'
,
float
(
1.0
),
'force_cpu'
,
False
,
'dtype'
,
one
.
dtype
,
'str_value'
,
'1.0'
,
'shape'
,
logit
.
shape
)
loss
=
_C_ops
.
sigmoid_cross_entropy_with_logits
(
logit
,
label
)
pred
=
_C_ops
.
sigmoid
(
logit
)
p_t
=
_C_ops
.
elementwise_add
(
_C_ops
.
elementwise_mul
(
pred
,
label
),
_C_ops
.
elementwise_mul
(
_C_ops
.
elementwise_sub
(
one
,
pred
),
_C_ops
.
elementwise_sub
(
one
,
label
)))
alpha
=
fluid
.
dygraph
.
base
.
to_variable
([
alpha
],
dtype
=
loss
.
dtype
)
alpha_t
=
core
.
ops
.
elementwise_add
(
core
.
ops
.
elementwise_mul
(
alpha
,
label
),
core
.
ops
.
elementwise_mul
(
core
.
ops
.
elementwise_sub
(
one
,
alpha
),
core
.
ops
.
elementwise_sub
(
one
,
label
)))
loss
=
core
.
ops
.
elementwise_mul
(
alpha_t
,
loss
)
alpha_t
=
_C_
ops
.
elementwise_add
(
_C_
ops
.
elementwise_mul
(
alpha
,
label
),
_C_
ops
.
elementwise_mul
(
_C_
ops
.
elementwise_sub
(
one
,
alpha
),
_C_
ops
.
elementwise_sub
(
one
,
label
)))
loss
=
_C_
ops
.
elementwise_mul
(
alpha_t
,
loss
)
gamma
=
fluid
.
dygraph
.
base
.
to_variable
([
gamma
],
dtype
=
loss
.
dtype
)
gamma_t
=
core
.
ops
.
elementwise_pow
(
core
.
ops
.
elementwise_sub
(
one
,
p_t
),
gamma
)
loss
=
core
.
ops
.
elementwise_mul
(
gamma_t
,
loss
)
gamma_t
=
_C_
ops
.
elementwise_pow
(
_C_
ops
.
elementwise_sub
(
one
,
p_t
),
gamma
)
loss
=
_C_
ops
.
elementwise_mul
(
gamma_t
,
loss
)
if
normalizer
is
not
None
:
loss
=
core
.
ops
.
elementwise_div
(
loss
,
normalizer
)
loss
=
_C_
ops
.
elementwise_div
(
loss
,
normalizer
)
if
reduction
==
"sum"
:
return
core
.
ops
.
reduce_sum
(
loss
,
'reduce_all'
,
True
)
return
_C_
ops
.
reduce_sum
(
loss
,
'reduce_all'
,
True
)
elif
reduction
==
"mean"
:
return
core
.
ops
.
mean
(
loss
)
return
_C_
ops
.
mean
(
loss
)
return
loss
...
...
python/paddle/nn/functional/norm.py
浏览文件 @
f05098b5
...
...
@@ -23,6 +23,7 @@ from ...fluid.initializer import Constant
from
...fluid.param_attr
import
ParamAttr
from
...fluid
import
core
,
dygraph_utils
import
numbers
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -79,9 +80,9 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
"""
if
in_dygraph_mode
():
eps
=
fluid
.
dygraph
.
base
.
to_variable
([
epsilon
],
dtype
=
x
.
dtype
)
out
=
core
.
ops
.
p_norm
(
x
,
'axis'
,
axis
,
'porder'
,
out
=
_C_
ops
.
p_norm
(
x
,
'axis'
,
axis
,
'porder'
,
float
(
p
),
'keepdim'
,
True
,
'epsilon'
,
epsilon
)
return
x
/
core
.
ops
.
elementwise_max
(
out
,
eps
)
return
x
/
_C_
ops
.
elementwise_max
(
out
,
eps
)
check_type
(
p
,
'p'
,
(
float
,
int
),
'normalize'
)
check_type
(
axis
,
'axis'
,
(
int
),
'normalize'
)
...
...
@@ -185,7 +186,7 @@ def batch_norm(x,
not
training
,
"data_layout"
,
data_format
,
"use_mkldnn"
,
False
,
"fuse_with_relu"
,
False
,
"use_global_stats"
,
use_global_stats
,
"trainable_statistics"
,
trainable_statistics
)
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
batch_norm
(
batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
batch_norm
(
x
,
weight
,
bias
,
running_mean
,
running_var
,
mean_out
,
variance_out
,
*
attrs
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
...
@@ -301,7 +302,7 @@ def layer_norm(x,
1
:]
+
', but got input shape '
+
str
(
input_shape
))
if
in_dygraph_mode
():
pre_act
,
_
,
_
=
core
.
ops
.
layer_norm
(
x
,
weight
,
bias
,
'epsilon'
,
epsilon
,
pre_act
,
_
,
_
=
_C_
ops
.
layer_norm
(
x
,
weight
,
bias
,
'epsilon'
,
epsilon
,
'begin_norm_axis'
,
begin_norm_axis
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
pre_act
,
act
=
None
)
...
...
@@ -385,7 +386,7 @@ def instance_norm(x,
"""
if
in_dygraph_mode
():
out
,
_
,
_
=
core
.
ops
.
instance_norm
(
x
,
weight
,
bias
,
"epsilon"
,
eps
,
out
,
_
,
_
=
_C_
ops
.
instance_norm
(
x
,
weight
,
bias
,
"epsilon"
,
eps
,
"momentum"
,
momentum
,
"data_format"
,
data_format
)
return
out
...
...
python/paddle/nn/functional/pooling.py
浏览文件 @
f05098b5
...
...
@@ -17,6 +17,8 @@ from ...fluid import core
from
...fluid.framework
import
in_dygraph_mode
from
...fluid.layers
import
utils
,
LayerHelper
,
unsqueeze
,
squeeze
from
...fluid.data_feeder
import
check_type
,
check_variable_and_dtype
from
paddle
import
_C_ops
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -216,7 +218,7 @@ def avg_pool1d(x,
padding
=
_expand_low_nd_padding
(
padding
)
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool2d
(
output
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
...
@@ -327,12 +329,12 @@ def avg_pool2d(x,
padding
,
2
,
channel_last
,
ceil_mode
=
ceil_mode
)
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'global_pooling
'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
'use_mkldnn'
,
False
,
'exclusive'
,
exclusive
,
'data_format
'
,
data_format
)
output
=
_C_ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm
'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
'use_mkldnn'
,
False
,
'exclusive
'
,
exclusive
,
'data_format'
,
data_format
)
if
divisor_override
is
None
:
return
output
else
:
...
...
@@ -446,7 +448,7 @@ def avg_pool3d(x,
padding
,
3
,
channel_last
=
channel_last
,
ceil_mode
=
ceil_mode
)
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool3d
(
output
=
_C_
ops
.
pool3d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
kernel_size
,
'strides'
,
stride
,
'paddings'
,
padding
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
...
@@ -566,7 +568,7 @@ def max_pool1d(x,
if
in_dygraph_mode
():
if
return_mask
:
pool_out
=
core
.
ops
.
max_pool2d_with_index
(
pool_out
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
...
@@ -576,7 +578,7 @@ def max_pool1d(x,
squeeze
(
pool_out
[
1
],
[
2
]))
if
return_mask
else
squeeze
(
pool_out
[
0
],
[
2
])
else
:
pool_out
=
core
.
ops
.
pool2d
(
pool_out
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
...
...
@@ -704,7 +706,7 @@ def max_pool2d(x,
if
in_dygraph_mode
():
if
return_mask
:
output
=
core
.
ops
.
max_pool2d_with_index
(
output
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'strides'
,
stride
,
'paddings'
,
padding
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
'ceil_mode'
,
ceil_mode
,
...
...
@@ -712,7 +714,7 @@ def max_pool2d(x,
data_format
)
return
output
if
return_mask
else
output
[
0
]
else
:
output
=
core
.
ops
.
pool2d
(
output
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
...
...
@@ -835,7 +837,7 @@ def max_pool3d(x,
if
in_dygraph_mode
():
if
return_mask
:
output
=
core
.
ops
.
max_pool3d_with_index
(
output
=
_C_
ops
.
max_pool3d_with_index
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'strides'
,
stride
,
'paddings'
,
padding
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'use_cudnn'
,
True
,
...
...
@@ -843,7 +845,7 @@ def max_pool3d(x,
'data_format'
,
data_format
)
return
output
if
return_mask
else
output
[
0
]
else
:
output
=
core
.
ops
.
pool3d
(
output
=
_C_
ops
.
pool3d
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
kernel_size
,
'global_pooling'
,
False
,
'padding_algorithm'
,
padding_algorithm
,
'strides'
,
stride
,
'paddings'
,
padding
,
'use_cudnn'
,
True
,
...
...
@@ -932,7 +934,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
x
=
unsqueeze
(
x
,
[
2
])
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
pool2d
(
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_out
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_size
,
'adaptive'
,
True
)
return
squeeze
(
pool_out
,
[
2
])
...
...
@@ -1031,7 +1033,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
output_size
[
1
]
=
in_w
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
output
=
_C_
ops
.
pool2d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
'global_pooling'
,
False
,
'adaptive'
,
True
,
'data_format'
,
data_format
)
return
output
...
...
@@ -1137,7 +1139,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
output_size
[
2
]
=
in_w
if
in_dygraph_mode
():
output
=
core
.
ops
.
pool3d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
output
=
_C_
ops
.
pool3d
(
x
,
'pooling_type'
,
'avg'
,
'ksize'
,
output_size
,
'global_pooling'
,
False
,
'adaptive'
,
True
,
'data_format'
,
data_format
)
return
output
...
...
@@ -1221,7 +1223,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
x
=
unsqueeze
(
x
,
[
2
])
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
max_pool2d_with_index
(
pool_out
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'pooling_type'
,
pool_type
,
'ksize'
,
pool_size
,
'adaptive'
,
True
)
return
(
squeeze
(
pool_out
[
0
],
[
2
]),
squeeze
(
pool_out
[
1
],
[
2
]))
if
return_mask
else
squeeze
(
pool_out
[
0
],
[
2
])
...
...
@@ -1310,7 +1312,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
output_size
[
1
]
=
in_w
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
max_pool2d_with_index
(
pool_out
=
_C_
ops
.
max_pool2d_with_index
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
output_size
,
'adaptive'
,
True
)
return
pool_out
if
return_mask
else
pool_out
[
0
]
...
...
@@ -1403,7 +1405,7 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
output_size
[
2
]
=
in_w
if
in_dygraph_mode
():
pool_out
=
core
.
ops
.
max_pool3d_with_index
(
pool_out
=
_C_
ops
.
max_pool3d_with_index
(
x
,
'pooling_type'
,
'max'
,
'ksize'
,
output_size
,
'adaptive'
,
True
)
return
pool_out
if
return_mask
else
pool_out
[
0
]
...
...
python/paddle/nn/functional/vision.py
浏览文件 @
f05098b5
...
...
@@ -18,6 +18,7 @@ from ...fluid.layer_helper import LayerHelper
from
...fluid.data_feeder
import
check_variable_and_dtype
from
...fluid
import
dygraph_utils
import
numpy
as
np
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -91,7 +92,7 @@ def affine_grid(theta, out_shape, align_corners=True, name=None):
if
in_dygraph_mode
():
_out_shape
=
out_shape
.
numpy
().
tolist
()
if
isinstance
(
out_shape
,
Variable
)
else
out_shape
return
core
.
ops
.
affine_grid
(
theta
,
"output_shape"
,
_out_shape
,
return
_C_
ops
.
affine_grid
(
theta
,
"output_shape"
,
_out_shape
,
"align_corners"
,
align_corners
,
"use_cudnn"
,
use_cudnn
)
...
...
@@ -272,7 +273,7 @@ def grid_sample(x,
if
in_dygraph_mode
():
attrs
=
(
'mode'
,
mode
,
'padding_mode'
,
padding_mode
,
'align_corners'
,
align_corners
,
'use_cudnn'
,
use_cudnn
)
out
=
getattr
(
core
.
ops
,
'grid_sampler'
)(
x
,
grid
,
*
attrs
)
out
=
getattr
(
_C_
ops
,
'grid_sampler'
)(
x
,
grid
,
*
attrs
)
else
:
helper
=
LayerHelper
(
"grid_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'grid_sample'
)
...
...
@@ -328,7 +329,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
data_format
))
if
in_dygraph_mode
():
return
core
.
ops
.
pixel_shuffle
(
x
,
"upscale_factor"
,
upscale_factor
,
return
_C_
ops
.
pixel_shuffle
(
x
,
"upscale_factor"
,
upscale_factor
,
"data_format"
,
data_format
)
helper
=
LayerHelper
(
"pixel_shuffle"
,
**
locals
())
...
...
python/paddle/nn/layer/distance.py
浏览文件 @
f05098b5
...
...
@@ -19,6 +19,7 @@ from ...fluid.dygraph import layers
from
...fluid.framework
import
core
,
in_dygraph_mode
from
...fluid.data_feeder
import
check_variable_and_dtype
,
check_type
from
...fluid.layer_helper
import
LayerHelper
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -78,8 +79,8 @@ class PairwiseDistance(layers.Layer):
def
forward
(
self
,
x
,
y
):
if
in_dygraph_mode
():
sub
=
core
.
ops
.
elementwise_sub
(
x
,
y
)
return
core
.
ops
.
p_norm
(
sub
,
'axis'
,
1
,
'porder'
,
self
.
p
,
'keepdim'
,
sub
=
_C_
ops
.
elementwise_sub
(
x
,
y
)
return
_C_
ops
.
p_norm
(
sub
,
'axis'
,
1
,
'porder'
,
self
.
p
,
'keepdim'
,
self
.
keepdim
,
'epsilon'
,
self
.
epsilon
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
...
...
python/paddle/nn/layer/norm.py
浏览文件 @
f05098b5
...
...
@@ -49,6 +49,7 @@ import numbers
import
warnings
from
...fluid.dygraph.base
import
no_grad
from
..
import
functional
as
F
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -1083,7 +1084,7 @@ class SyncBatchNorm(_BatchNormBase):
self
.
_data_format
,
"use_mkldnn"
,
False
,
"fuse_with_relu"
,
False
,
"use_global_stats"
,
False
,
'trainable_statistics'
,
False
)
sync_batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
sync_batch_norm
(
sync_batch_norm_out
,
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
sync_batch_norm
(
x
,
self
.
weight
,
self
.
bias
,
self
.
_mean
,
self
.
_variance
,
mean_out
,
variance_out
,
*
attrs
)
...
...
python/paddle/nn/layer/rnn.py
浏览文件 @
f05098b5
...
...
@@ -32,7 +32,7 @@ from paddle.fluid.dygraph import Layer, LayerList
from
paddle.fluid.layers
import
utils
from
paddle.fluid.layers.utils
import
map_structure
,
flatten
,
pack_sequence_as
from
paddle.fluid.data_feeder
import
convert_dtype
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -981,7 +981,7 @@ class RNNBase(LayerList):
inputs
=
paddle
.
tensor
.
transpose
(
inputs
,
[
1
,
0
,
2
])
if
fluid
.
framework
.
in_dygraph_mode
():
_
,
_
,
out
,
state
=
framework
.
core
.
ops
.
rnn
(
_
,
_
,
out
,
state
=
_C_
ops
.
rnn
(
inputs
,
initial_states
,
self
.
_all_weights
,
sequence_length
,
self
.
_dropout_state
,
self
.
state_components
,
'dropout_prob'
,
self
.
dropout
,
'is_bidirec'
,
self
.
num_directions
==
2
,
...
...
python/paddle/nn/quant/quant_layers.py
浏览文件 @
f05098b5
...
...
@@ -24,6 +24,7 @@ from paddle.fluid.data_feeder import check_variable_and_dtype
from
paddle.nn
import
functional
as
F
import
logging
from
paddle.fluid.log_helper
import
get_logger
from
paddle
import
_C_ops
__all__
=
[
'FakeQuantAbsMax'
,
...
...
@@ -91,8 +92,8 @@ class FakeQuantAbsMax(layers.Layer):
dtype
=
self
.
_dtype
,
persistable
=
False
)
out_scale
.
stop_gradient
=
True
out
,
_
,
=
core
.
ops
.
fake_quantize_dequantize_abs_max
(
input
,
quant_out
,
out_scale
,
*
attrs
)
out
,
_
,
=
_C_ops
.
fake_quantize_dequantize_abs_max
(
input
,
quant_out
,
out_scale
,
*
attrs
)
return
out
check_variable_and_dtype
(
input
,
'input'
,
[
'float32'
],
"FakeQuantAbsMax"
)
...
...
@@ -185,7 +186,7 @@ class FakeQuantMovingAverageAbsMax(layers.Layer):
state
=
self
.
_state
if
self
.
training
else
None
accum
=
self
.
_accum
if
self
.
training
else
None
out
,
_
,
_
,
_
=
core
.
ops
.
fake_quantize_dequantize_moving_average_abs_max
(
out
,
_
,
_
,
_
=
_C_
ops
.
fake_quantize_dequantize_moving_average_abs_max
(
input
,
self
.
_scale
,
accum
,
state
,
quant_out
,
self
.
_scale
,
state
,
accum
,
*
attrs
)
return
out
...
...
@@ -271,7 +272,7 @@ class FakeQuantChannelWiseAbsMax(layers.Layer):
persistable
=
False
)
out_scale
.
stop_gradient
=
True
out
,
_
,
=
core
.
ops
.
fake_channel_wise_quantize_dequantize_abs_max
(
out
,
_
,
=
_C_
ops
.
fake_channel_wise_quantize_dequantize_abs_max
(
input
,
quant_out
,
out_scale
,
*
attrs
)
return
out
...
...
@@ -355,7 +356,7 @@ class MovingAverageAbsMaxScale(layers.Layer):
dtype
=
input
.
dtype
,
persistable
=
False
)
out
,
_
,
_
,
_
=
core
.
ops
.
moving_average_abs_max_scale
(
out
,
_
,
_
,
_
=
_C_
ops
.
moving_average_abs_max_scale
(
input
,
accum
,
state
,
quant_out
,
self
.
_scale
,
state
,
accum
,
*
attrs
)
return
out
...
...
python/paddle/optimizer/adam.py
浏览文件 @
f05098b5
...
...
@@ -24,6 +24,7 @@ from ..fluid.dygraph import base as imperative_base
from
collections
import
defaultdict
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -316,7 +317,7 @@ class Adam(Optimizer):
self
.
_beta1
,
Variable
)
else
self
.
_beta1
.
numpy
().
item
(
0
)
_beta2
=
self
.
_beta2
if
not
isinstance
(
self
.
_beta2
,
Variable
)
else
self
.
_beta2
.
numpy
().
item
(
0
)
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
adam
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
adam
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'epsilon'
,
self
.
_epsilon
,
...
...
python/paddle/optimizer/lamb.py
浏览文件 @
f05098b5
...
...
@@ -16,6 +16,7 @@ from .optimizer import Optimizer
from
..fluid
import
core
from
..fluid
import
framework
from
..fluid.framework
import
Variable
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -199,7 +200,7 @@ class Lamb(Optimizer):
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
_
,
_
,
_
,
_
,
_
=
core
.
ops
.
lamb
(
_
,
_
,
_
,
_
,
_
=
_C_
ops
.
lamb
(
param_and_grad
[
0
],
param_and_grad
[
1
],
lr
,
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
param_and_grad
[
0
],
moment1
,
moment2
,
beta1_pow_acc
,
beta2_pow_acc
,
'beta1'
,
self
.
_beta1
,
...
...
python/paddle/optimizer/momentum.py
浏览文件 @
f05098b5
...
...
@@ -23,6 +23,7 @@ from ..fluid import unique_name
from
..fluid
import
layers
import
paddle.fluid
as
fluid
from
paddle.fluid.regularizer
import
L2DecayRegularizer
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -293,7 +294,7 @@ class Momentum(Optimizer):
if
framework
.
in_dygraph_mode
():
if
isinstance
(
param_and_grad
,
dict
):
self
.
_update_regularization
(
param_and_grad
[
'weight_decay'
])
_
,
_
=
core
.
ops
.
momentum
(
_
,
_
=
_C_
ops
.
momentum
(
param_and_grad
[
0
],
param_and_grad
[
1
],
velocity_acc
,
lr
,
param_and_grad
[
0
],
velocity_acc
,
'mu'
,
self
.
_momentum
,
'use_nesterov'
,
self
.
_use_nesterov
,
'regularization_method'
,
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
f05098b5
...
...
@@ -41,6 +41,7 @@ from ..fluid.wrapped_decorator import signature_safe_contextmanager
from
..
import
compat
as
cpt
from
.lr
import
LRScheduler
import
copy
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -916,7 +917,7 @@ class Optimizer(object):
assert
regularization_term
is
not
None
if
framework
.
in_dygraph_mode
():
return
core
.
ops
.
sum
([
grad
,
regularization_term
])
return
_C_
ops
.
sum
([
grad
,
regularization_term
])
new_grad
=
grad
if
grad
.
type
==
core
.
VarDesc
.
VarType
.
SELECTED_ROWS
:
...
...
python/paddle/optimizer/sgd.py
浏览文件 @
f05098b5
...
...
@@ -17,6 +17,7 @@ from ..fluid import core
from
..fluid
import
framework
from
..fluid.framework
import
Variable
,
name_scope
from
..fluid.dygraph
import
no_grad
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -91,7 +92,7 @@ class SGD(Optimizer):
param_and_grad
=
self
.
_update_param_group
(
param_and_grad
)
lr
=
self
.
_create_param_lr
(
param_and_grad
)
if
framework
.
in_dygraph_mode
():
core
.
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
_C_
ops
.
sgd
(
param_and_grad
[
0
],
lr
,
param_and_grad
[
1
],
param_and_grad
[
0
])
return
None
...
...
python/paddle/tensor/attribute.py
浏览文件 @
f05098b5
...
...
@@ -21,6 +21,7 @@ from ..fluid.data_feeder import check_variable_and_dtype
# TODO: define functions to get tensor attributes
from
..fluid.layers
import
rank
# noqa: F401
from
..fluid.layers
import
shape
# noqa: F401
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -68,7 +69,7 @@ def real(x, name=None):
# [4., 5., 6.]])
"""
if
in_dygraph_mode
():
return
core
.
ops
.
real
(
x
)
return
_C_
ops
.
real
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'real'
)
helper
=
LayerHelper
(
'real'
,
**
locals
())
...
...
@@ -112,7 +113,7 @@ def imag(x, name=None):
# [3., 2., 1.]])
"""
if
in_dygraph_mode
():
return
core
.
ops
.
imag
(
x
)
return
_C_
ops
.
imag
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'complex64'
,
'complex128'
],
'imag'
)
helper
=
LayerHelper
(
'imag'
,
**
locals
())
...
...
python/paddle/tensor/creation.py
浏览文件 @
f05098b5
...
...
@@ -30,6 +30,7 @@ from ..fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varb
# TODO: define functions to get create a tensor
from
..fluid.layers
import
linspace
# noqa: F401
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -207,7 +208,7 @@ def full_like(x, fill_value, dtype=None, name=None):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
return
core
.
ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
return
_C_
ops
.
fill_any_like
(
x
,
'value'
,
fill_value
,
'dtype'
,
dtype
)
helper
=
LayerHelper
(
"full_like"
,
**
locals
())
check_variable_and_dtype
(
...
...
@@ -639,7 +640,7 @@ def tril(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
'tril_triu'
)
op
=
getattr
(
_C_
ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
True
)
return
_tril_triu_op
(
LayerHelper
(
'tril'
,
**
locals
()))
...
...
@@ -706,7 +707,7 @@ def triu(x, diagonal=0, name=None):
"""
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
'tril_triu'
)
op
=
getattr
(
_C_
ops
,
'tril_triu'
)
return
op
(
x
,
'diagonal'
,
diagonal
,
"lower"
,
False
)
return
_tril_triu_op
(
LayerHelper
(
'triu'
,
**
locals
()))
...
...
@@ -749,7 +750,7 @@ def meshgrid(*args, **kwargs):
args
=
args
[
0
]
if
in_dygraph_mode
():
num
=
len
(
args
)
out
=
core
.
ops
.
meshgrid
(
list
(
args
),
num
)
out
=
_C_
ops
.
meshgrid
(
list
(
args
),
num
)
return
out
name
=
kwargs
.
get
(
"name"
,
None
)
...
...
@@ -854,12 +855,12 @@ def diagflat(x, offset=0, name=None):
padding_value
=
0
if
in_dygraph_mode
():
if
len
(
x
.
shape
)
==
1
:
return
core
.
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
return
_C_
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
else
:
y
,
_
=
core
.
ops
.
flatten_contiguous_range
(
x
,
"start_axis"
,
0
,
y
,
_
=
_C_
ops
.
flatten_contiguous_range
(
x
,
"start_axis"
,
0
,
"stop_axis"
,
-
1
)
return
core
.
ops
.
diag_v2
(
y
,
"offset"
,
offset
,
"padding_value"
,
return
_C_
ops
.
diag_v2
(
y
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
check_type
(
x
,
'x'
,
(
Variable
),
'diagflat'
)
...
...
@@ -967,7 +968,7 @@ def diag(x, offset=0, padding_value=0, name=None):
# [4]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
return
_C_
ops
.
diag_v2
(
x
,
"offset"
,
offset
,
"padding_value"
,
padding_value
)
check_type
(
x
,
'x'
,
(
Variable
),
'diag_v2'
)
...
...
@@ -1049,7 +1050,7 @@ def empty(shape, dtype=None, name=None):
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
out
=
core
.
ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
out
=
_C_
ops
.
empty
(
'shape'
,
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
return
out
...
...
@@ -1116,7 +1117,7 @@ def empty_like(x, dtype=None, name=None):
dtype
=
convert_dtype
(
dtype
)
if
in_dygraph_mode
():
out
=
core
.
ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
out
=
_C_
ops
.
empty
(
'shape'
,
x
.
shape
,
'dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
out
.
stop_gradient
=
True
return
out
...
...
python/paddle/tensor/linalg.py
浏览文件 @
f05098b5
...
...
@@ -20,6 +20,7 @@ from ..fluid.framework import in_dygraph_mode, _varbase_creator
from
..fluid.layers
import
transpose
# noqa: F401
from
paddle.common_ops_import
import
core
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -131,7 +132,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
op_type
=
'matmul_v2'
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_type
)
op
=
getattr
(
_C_
ops
,
op_type
)
return
op
(
x
,
y
,
'trans_x'
,
transpose_x
,
'trans_y'
,
transpose_y
)
attrs
=
{
...
...
@@ -244,10 +245,10 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
)
if
in_dygraph_mode
():
if
dim
is
None
:
return
core
.
ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
frobenius_norm
(
input
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
True
)
return
core
.
ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
False
)
return
_C_ops
.
frobenius_norm
(
input
,
'dim'
,
dim
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
False
)
attrs
=
{
'dim'
:
dim
,
'keep_dim'
:
keepdim
,
'reduce_all'
:
False
}
if
dim
is
None
:
attrs
[
'reduce_all'
]
=
True
...
...
@@ -281,7 +282,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
"""
if
in_dygraph_mode
():
if
axis
is
None
:
axis
=
-
1
return
core
.
ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
return
_C_
ops
.
p_norm
(
input
,
'porder'
,
porder
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'asvector'
,
asvector
)
if
porder
is
not
None
:
check_type
(
porder
,
'porder'
,
(
float
,
int
),
'p_norm'
)
...
...
@@ -576,7 +577,7 @@ def dot(x, y, name=None):
op_type
=
'dot'
# skip var type check in dygraph mode to improve efficiency
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_type
)
op
=
getattr
(
_C_
ops
,
op_type
)
return
op
(
x
,
y
)
assert
x
is
not
None
,
'x cannot be None in {}'
.
format
(
op_type
)
...
...
@@ -651,7 +652,7 @@ def t(input, name=None):
return
input
# 2-D tensor
perm
=
[
1
,
0
]
out
,
_
=
core
.
ops
.
transpose2
(
input
,
'axis'
,
perm
)
out
,
_
=
_C_
ops
.
transpose2
(
input
,
'axis'
,
perm
)
return
out
check_variable_and_dtype
(
...
...
@@ -713,9 +714,9 @@ def cross(x, y, axis=None, name=None):
"""
if
in_dygraph_mode
():
if
axis
is
not
None
:
return
core
.
ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
return
_C_
ops
.
cross
(
x
,
y
,
'dim'
,
axis
)
else
:
return
core
.
ops
.
cross
(
x
,
y
)
return
_C_
ops
.
cross
(
x
,
y
)
helper
=
LayerHelper
(
"cross"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
x
.
dtype
)
...
...
@@ -771,7 +772,7 @@ def cholesky(x, upper=False, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
cholesky
(
x
,
"upper"
,
upper
)
return
_C_
ops
.
cholesky
(
x
,
"upper"
,
upper
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'cholesky'
)
check_type
(
upper
,
'upper'
,
bool
,
'cholesky'
)
helper
=
LayerHelper
(
'cholesky'
,
**
locals
())
...
...
@@ -834,7 +835,7 @@ def bmm(x, y, name=None):
format
(
x_shape
,
y_shape
))
if
in_dygraph_mode
():
return
core
.
ops
.
bmm
(
x
,
y
)
return
_C_
ops
.
bmm
(
x
,
y
)
helper
=
LayerHelper
(
'bmm'
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -867,7 +868,7 @@ def histogram(input, bins=100, min=0, max=0):
print(result) # [0, 2, 1, 0]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
return
_C_
ops
.
histogram
(
input
,
"bins"
,
bins
,
"min"
,
min
,
"max"
,
max
)
helper
=
LayerHelper
(
'histogram'
,
**
locals
())
check_variable_and_dtype
(
...
...
@@ -914,7 +915,7 @@ def mv(x, vec, name=None):
out = paddle.mv(x, vec)
"""
if
in_dygraph_mode
():
out
=
core
.
ops
.
mv
(
x
,
vec
)
out
=
_C_
ops
.
mv
(
x
,
vec
)
return
out
def
__check_input
(
x
,
vec
):
...
...
python/paddle/tensor/logic.py
浏览文件 @
f05098b5
...
...
@@ -27,6 +27,7 @@ from ..fluid.layers import logical_or # noqa: F401
from
..fluid.layers
import
logical_xor
# noqa: F401
from
paddle.common_ops_import
import
core
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -60,7 +61,7 @@ def equal_all(x, y, name=None):
print(result2) # result2 = [False ]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
equal_all
(
x
,
y
)
return
_C_
ops
.
equal_all
(
x
,
y
)
helper
=
LayerHelper
(
"equal_all"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
...
...
@@ -123,7 +124,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
allclose
(
x
,
y
,
'rtol'
,
return
_C_
ops
.
allclose
(
x
,
y
,
'rtol'
,
str
(
rtol
),
'atol'
,
str
(
atol
),
'equal_nan'
,
equal_nan
)
...
...
@@ -174,7 +175,7 @@ def equal(x, y, name=None):
print(result1) # result1 = [True False False]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
equal
(
x
,
y
)
return
_C_
ops
.
equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"equal"
)
...
...
@@ -216,7 +217,7 @@ def greater_equal(x, y, name=None):
print(result1) # result1 = [True False True]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
greater_equal
(
x
,
y
)
return
_C_
ops
.
greater_equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
...
...
@@ -262,7 +263,7 @@ def greater_than(x, y, name=None):
print(result1) # result1 = [False False True]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
greater_than
(
x
,
y
)
return
_C_
ops
.
greater_than
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
...
...
@@ -309,7 +310,7 @@ def less_equal(x, y, name=None):
print(result1) # result1 = [True True False]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
less_equal
(
x
,
y
)
return
_C_
ops
.
less_equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_equal"
)
...
...
@@ -352,7 +353,7 @@ def less_than(x, y, name=None):
print(result1) # result1 = [False True False]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
less_than
(
x
,
y
)
return
_C_
ops
.
less_than
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"less_than"
)
...
...
@@ -395,7 +396,7 @@ def not_equal(x, y, name=None):
print(result1) # result1 = [False True True]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
not_equal
(
x
,
y
)
return
_C_
ops
.
not_equal
(
x
,
y
)
check_variable_and_dtype
(
x
,
"x"
,
[
"bool"
,
"float32"
,
"float64"
,
"int32"
,
"int64"
],
"not_equal"
)
...
...
@@ -441,7 +442,7 @@ def is_tensor(x):
def
_bitwise_op
(
op_name
,
x
,
y
,
out
=
None
,
name
=
None
,
binary_op
=
True
):
if
in_dygraph_mode
():
op
=
getattr
(
core
.
ops
,
op_name
)
op
=
getattr
(
_C_
ops
,
op_name
)
if
binary_op
:
return
op
(
x
,
y
)
else
:
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
f05098b5
...
...
@@ -32,6 +32,7 @@ from ..fluid.layers import shard_index # noqa: F401
from
..fluid
import
layers
from
..fluid.dygraph.inplace_utils
import
inplace_apis_in_dygraph_only
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -151,7 +152,7 @@ def broadcast_tensors(input, name=None):
num_inputs
=
len
(
input
)
if
in_dygraph_mode
():
return
core
.
ops
.
broadcast_tensors
(
input
,
num_inputs
)
return
_C_
ops
.
broadcast_tensors
(
input
,
num_inputs
)
check_type
(
input
,
'input'
,
(
list
,
tuple
),
'broadcast_tensors'
)
if
num_inputs
<
1
:
...
...
@@ -361,8 +362,8 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None):
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
if
in_dygraph_mode
():
dy_out
,
_
=
core
.
ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
dy_out
,
_
=
_C_ops
.
flatten_contiguous_range
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
helper
=
LayerHelper
(
'flatten'
,
**
locals
())
...
...
@@ -403,7 +404,7 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None):
if
start_axis
>
stop_axis
:
raise
ValueError
(
"The stop_axis should be larger than stat_axis"
)
dy_out
,
_
=
core
.
ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
dy_out
,
_
=
_C_
ops
.
flatten_contiguous_range_
(
x
,
'start_axis'
,
start_axis
,
'stop_axis'
,
stop_axis
)
return
dy_out
...
...
@@ -460,7 +461,7 @@ def roll(x, shifts, axis=None, name=None):
axis
=
[]
if
in_dygraph_mode
():
return
core
.
ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
return
_C_
ops
.
roll
(
x
,
'axis'
,
axis
,
'shifts'
,
shifts
)
helper
=
LayerHelper
(
"roll"
,
**
locals
())
check_type
(
axis
,
'axis'
,
(
list
,
tuple
),
'roll'
)
...
...
@@ -705,7 +706,7 @@ def squeeze_(x, axis=None, name=None):
elif
isinstance
(
axis
,
tuple
):
axis
=
list
(
axis
)
out
,
_
=
core
.
ops
.
squeeze2_
(
x
,
'axes'
,
axis
)
out
,
_
=
_C_
ops
.
squeeze2_
(
x
,
'axes'
,
axis
)
return
out
...
...
@@ -766,7 +767,7 @@ def unique(x,
axis
=
[
axis
]
attr_dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
out
,
inverse
,
indices
,
counts
=
core
.
ops
.
unique
(
out
,
inverse
,
indices
,
counts
=
_C_
ops
.
unique
(
x
,
'dtype'
,
attr_dtype
,
'return_index'
,
return_index
,
'return_inverse'
,
return_inverse
,
'return_counts'
,
return_counts
,
'axis'
,
axis
,
"is_sorted"
,
True
)
...
...
@@ -897,7 +898,7 @@ def unsqueeze_(x, axis, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
axis
]
out
,
_
=
core
.
ops
.
unsqueeze2_
(
x
,
'axes'
,
axis
)
out
,
_
=
_C_
ops
.
unsqueeze2_
(
x
,
'axes'
,
axis
)
return
out
...
...
@@ -951,7 +952,7 @@ def gather(x, index, axis=None, name=None):
if
in_dygraph_mode
():
axis
=
axis
.
item
()
if
isinstance
(
axis
,
paddle
.
Tensor
)
else
axis
return
core
.
ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
return
_C_
ops
.
gather
(
x
,
index
,
None
,
"axis"
,
axis
,
"overwrite"
,
False
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
,
'uint8'
],
...
...
@@ -1024,7 +1025,7 @@ def unbind(input, axis=0):
axis_
=
axis
if
axis
>=
0
else
len
(
input_shape
)
+
axis
num
=
input_shape
[
axis_
]
if
in_dygraph_mode
():
return
core
.
ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
return
_C_
ops
.
unbind
(
input
,
num
,
'axis'
,
axis
)
helper
=
LayerHelper
(
"unbind"
,
**
locals
())
check_type
(
input
,
'input'
,
(
Variable
),
'unbind'
)
...
...
@@ -1116,7 +1117,7 @@ def scatter(x, index, updates, overwrite=True, name=None):
# [1., 1.]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
return
_C_
ops
.
scatter
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
check_variable_and_dtype
(
x
,
'dtype'
,
[
'float32'
,
'float64'
],
'scatter'
)
check_type
(
overwrite
,
'overwrite'
,
bool
,
'scatter'
)
...
...
@@ -1138,7 +1139,7 @@ def scatter_(x, index, updates, overwrite=True, name=None):
Inplace version of ``scatter`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tensor_scatter`.
"""
return
core
.
ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
return
_C_
ops
.
scatter_
(
x
,
index
,
updates
,
'overwrite'
,
overwrite
)
def
scatter_nd_add
(
x
,
index
,
updates
,
name
=
None
):
...
...
@@ -1293,7 +1294,7 @@ def tile(x, repeat_times, name=None):
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
return
_C_
ops
.
tile
(
x
,
'repeat_times'
,
repeat_times
)
check_type
(
repeat_times
,
'repeat_times'
,
(
list
,
tuple
,
Variable
),
'tile'
)
if
isinstance
(
repeat_times
,
Variable
):
assert
len
(
repeat_times
.
shape
)
==
1
,
(
...
...
@@ -1376,7 +1377,7 @@ def expand_as(x, y, name=None):
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
return
_C_
ops
.
expand_as_v2
(
x
,
'target_shape'
,
y
.
shape
)
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'expand_as'
)
...
...
@@ -1430,7 +1431,7 @@ def broadcast_to(x, shape, name=None):
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
return
_C_
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
(
'shape must be an 1-D Tensor.'
)
...
...
@@ -1517,7 +1518,7 @@ def expand(x, shape, name=None):
# [[1, 2, 3], [1, 2, 3]]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
return
_C_
ops
.
expand_v2
(
x
,
'shape'
,
shape
)
if
isinstance
(
shape
,
Variable
):
assert
len
(
shape
.
shape
)
==
1
,
(
'shape must be an 1-D Tensor.'
)
...
...
@@ -1663,11 +1664,11 @@ def reshape_(x, shape, name=None):
item
.
numpy
().
item
(
0
)
if
isinstance
(
item
,
Variable
)
else
item
for
item
in
shape
]
out
,
_
=
core
.
ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
out
,
_
=
_C_
ops
.
reshape2_
(
x
,
None
,
'shape'
,
shape
)
return
out
elif
isinstance
(
shape
,
Variable
):
shape
.
stop_gradient
=
True
out
,
_
=
core
.
ops
.
reshape2_
(
x
,
shape
)
out
,
_
=
_C_
ops
.
reshape2_
(
x
,
shape
)
return
out
...
...
python/paddle/tensor/math.py
浏览文件 @
f05098b5
...
...
@@ -67,6 +67,7 @@ from ..fluid.layers import lgamma # noqa: F401
from
..fluid.layers
import
multiplex
# noqa: F401
from
..fluid
import
layers
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -91,7 +92,7 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
Please refer to :ref:`api_tensor_scale`.
"""
_scale
=
scale
.
numpy
().
item
(
0
)
if
isinstance
(
scale
,
Variable
)
else
scale
return
core
.
ops
.
scale_
(
x
,
'scale'
,
return
_C_
ops
.
scale_
(
x
,
'scale'
,
float
(
_scale
),
'bias'
,
float
(
bias
),
'bias_after_scale'
,
bias_after_scale
)
...
...
@@ -144,7 +145,7 @@ def pow(x, y, name=None):
# in dynamic graph mode
if
in_dygraph_mode
():
if
isinstance
(
y
,
(
int
,
float
)):
return
core
.
ops
.
pow
(
x
,
'factor'
,
y
)
return
_C_
ops
.
pow
(
x
,
'factor'
,
y
)
elif
isinstance
(
y
,
(
paddle
.
Tensor
,
Variable
)):
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=-
1
,
act
=
None
,
op_name
=
'elementwise_pow'
)
...
...
@@ -177,7 +178,7 @@ def _elementwise_op_in_dygraph(x,
act
=
None
,
use_mkldnn
=
False
,
op_name
=
None
):
op
=
getattr
(
core
.
ops
,
op_name
)
op
=
getattr
(
_C_
ops
,
op_name
)
out
=
op
(
x
,
y
,
'axis'
,
axis
,
'use_mkldnn'
,
use_mkldnn
)
return
dygraph_utils
.
_append_activation_in_dygraph
(
...
...
@@ -236,7 +237,7 @@ def add(x, y, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
elementwise_add
(
x
,
y
)
return
_C_
ops
.
elementwise_add
(
x
,
y
)
return
_elementwise_op
(
LayerHelper
(
'elementwise_add'
,
**
locals
()))
...
...
@@ -725,12 +726,12 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
if
in_dygraph_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
dtype_flag
:
return
core
.
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
,
'in_dtype'
,
x
.
dtype
,
'out_dtype'
,
convert_np_dtype_to_dtype_
(
dtype
))
else
:
return
core
.
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_sum
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
attrs
=
{
...
...
@@ -839,7 +840,7 @@ def add_n(inputs, name=None):
if
in_dygraph_mode
():
if
isinstance
(
inputs
,
Variable
):
inputs
=
[
inputs
]
return
core
.
ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
return
_C_
ops
.
sum
(
inputs
,
'use_mkldnn'
,
False
)
helper
=
LayerHelper
(
'add_n'
,
**
locals
())
check_type
(
inputs
,
'inputs'
,
(
Variable
,
tuple
,
list
),
'add_n'
)
...
...
@@ -893,7 +894,7 @@ def trunc(input, name=None):
# [0., 0.]]))
'''
if
in_dygraph_mode
():
return
core
.
ops
.
trunc
(
input
)
return
_C_
ops
.
trunc
(
input
)
else
:
inputs
=
{
"X"
:
input
}
attrs
=
{}
...
...
@@ -948,7 +949,7 @@ def mm(input, mat2, name=None):
"""
if
in_dygraph_mode
():
out
=
_varbase_creator
(
dtype
=
input
.
dtype
)
core
.
ops
.
matmul
(
input
,
mat2
,
out
)
_C_
ops
.
matmul
(
input
,
mat2
,
out
)
return
out
def
__check_input
(
x
,
y
):
...
...
@@ -1054,7 +1055,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None):
if
in_dygraph_mode
():
out
=
core
.
ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
out
=
_C_
ops
.
addmm
(
input
,
x
,
y
,
"Alpha"
,
alpha
,
"Beta"
,
beta
)
return
out
inputs
=
{
'Input'
:
input
,
"X"
:
x
,
"Y"
:
y
}
...
...
@@ -1121,7 +1122,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
axis
=
[
0
]
if
in_dygraph_mode
():
return
core
.
ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
return
_C_
ops
.
logsumexp
(
x
,
'axis'
,
axis
,
'keepdim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
...
...
@@ -1165,7 +1166,7 @@ def inverse(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
inverse
(
x
)
return
_C_
ops
.
inverse
(
x
)
def
_check_input
(
x
):
check_variable_and_dtype
(
x
,
'x'
,
...
...
@@ -1256,7 +1257,7 @@ def max(x, axis=None, keepdim=False, name=None):
reduce_all
=
True
if
axis
==
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
in_dygraph_mode
():
return
core
.
ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_max
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'max'
,
**
locals
())
...
...
@@ -1345,7 +1346,7 @@ def min(x, axis=None, keepdim=False, name=None):
reduce_all
=
True
if
axis
==
None
or
axis
==
[]
else
False
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
if
in_dygraph_mode
():
return
core
.
ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_min
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
helper
=
LayerHelper
(
'min'
,
**
locals
())
...
...
@@ -1391,7 +1392,7 @@ def log1p(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
log1p
(
x
)
return
_C_
ops
.
log1p
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
"log1p"
)
inputs
=
{
'X'
:
[
x
]}
...
...
@@ -1440,7 +1441,7 @@ def log2(x, name=None):
print(res) # [1.0]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
log2
(
x
)
return
_C_
ops
.
log2
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log2"
)
inputs
=
{
'X'
:
[
x
]}
...
...
@@ -1490,7 +1491,7 @@ def log10(x, name=None):
print(res) # [1.0]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
log10
(
x
)
return
_C_
ops
.
log10
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
"log10"
)
inputs
=
{
'X'
:
[
x
]}
...
...
@@ -1557,7 +1558,7 @@ def clip(x, min=None, max=None, name=None):
max
=
max
.
numpy
().
item
(
0
)
min
=
min_
if
min
is
None
else
min
max
=
max_
if
max
is
None
else
max
return
core
.
ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
return
_C_
ops
.
clip
(
x
,
"min"
,
min
,
"max"
,
max
)
if
min
is
not
None
:
check_type
(
min
,
'min'
,
(
float
,
int
,
Variable
),
'clip'
)
...
...
@@ -1610,7 +1611,7 @@ def clip_(x, min=None, max=None, name=None):
max
=
max
.
numpy
().
item
(
0
)
min
=
fmin
if
min
is
None
else
min
max
=
fmax
if
max
is
None
else
max
return
core
.
ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
return
_C_
ops
.
clip_
(
x
,
"min"
,
min
,
"max"
,
max
)
...
...
@@ -1656,7 +1657,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None):
data3 = paddle.trace(case3, offset=-3, axis1=1, axis2=-1) # data2.shape = [3, 5]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
return
_C_
ops
.
trace
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
inputs
=
{
'Input'
:
[
x
]}
attrs
=
{
'offset'
:
offset
,
'axis1'
:
axis1
,
'axis2'
:
axis2
}
...
...
@@ -1768,7 +1769,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
return
_C_
ops
.
diagonal
(
x
,
'offset'
,
offset
,
'axis1'
,
axis1
,
'axis2'
,
axis2
)
def
__check_input
(
input
,
offset
,
dim1
,
dim2
):
check_dtype
(
x
.
dtype
,
'Input'
,
...
...
@@ -1845,7 +1846,7 @@ ${comment}
# [21, 24, 27, 28, 32, 36]])
"""
if
in_dygraph_mode
():
return
core
.
ops
.
kron
(
x
,
y
)
return
_C_
ops
.
kron
(
x
,
y
)
helper
=
LayerHelper
(
'kron'
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'kron'
)
...
...
@@ -1906,9 +1907,9 @@ def cumsum(x, axis=None, dtype=None, name=None):
if
in_dygraph_mode
():
if
axis
is
None
:
return
core
.
ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
return
_C_
ops
.
cumsum
(
x
,
'flatten'
,
flatten
)
else
:
return
core
.
ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
return
_C_
ops
.
cumsum
(
x
,
'axis'
,
axis
,
'flatten'
,
flatten
)
check_type
(
x
,
'x'
,
(
Variable
),
'cumsum'
)
locals_var
=
locals
().
copy
()
...
...
@@ -1941,7 +1942,7 @@ def isfinite(x, name=None):
print(out) # [False True True False True False False]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
isfinite_v2
(
x
)
return
_C_
ops
.
isfinite_v2
(
x
)
helper
=
LayerHelper
(
"isfinite_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isfinite'
)
out
=
helper
.
create_variable_for_type_inference
(
'bool'
)
...
...
@@ -1969,7 +1970,7 @@ def isinf(x, name=None):
print(out) # [ True False False True False False False]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
isinf_v2
(
x
)
return
_C_
ops
.
isinf_v2
(
x
)
helper
=
LayerHelper
(
"isinf_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isinf'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
...
...
@@ -1997,7 +1998,7 @@ def isnan(x, name=None):
print(out) # [False False False False False True True]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
isnan_v2
(
x
)
return
_C_
ops
.
isnan_v2
(
x
)
helper
=
LayerHelper
(
"isnan_v2"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'isnan'
)
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
'bool'
)
...
...
@@ -2094,7 +2095,7 @@ def sign(x, name=None):
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
sign
(
x
)
return
_C_
ops
.
sign
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'sign'
)
helper
=
LayerHelper
(
"sign"
,
**
locals
())
...
...
@@ -2131,7 +2132,7 @@ def tanh(x, name=None):
# [-0.37994896 -0.19737532 0.09966799 0.29131261]
"""
if
in_dygraph_mode
():
return
core
.
ops
.
tanh
(
x
)
return
_C_
ops
.
tanh
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float16'
,
'float32'
,
'float64'
],
'tanh'
)
check_type
(
x
,
'x'
,
(
Variable
),
'tanh'
)
...
...
@@ -2146,7 +2147,7 @@ def tanh_(x, name=None):
Inplace version of ``tanh`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_tensor_tanh`.
"""
return
core
.
ops
.
tanh_
(
x
)
return
_C_
ops
.
tanh_
(
x
)
def
increment
(
x
,
value
=
1.0
,
name
=
None
):
...
...
@@ -2173,7 +2174,7 @@ def increment(x, value=1.0, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
increment
(
x
,
'step'
,
value
)
return
_C_
ops
.
increment
(
x
,
'step'
,
value
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'increment'
)
...
...
@@ -2255,7 +2256,7 @@ def all(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
core
.
ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_all
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
attrs
=
{
...
...
@@ -2263,7 +2264,6 @@ def all(x, axis=None, keepdim=False, name=None):
'keep_dim'
:
keepdim
,
'reduce_all'
:
reduce_all_flag
}
check_variable_and_dtype
(
x
,
'x'
,
[
'bool'
],
'all'
)
...
...
@@ -2348,7 +2348,7 @@ def any(x, axis=None, keepdim=False, name=None):
if
in_dygraph_mode
():
axis
=
axis
if
axis
!=
None
and
axis
!=
[]
else
[
0
]
return
core
.
ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_any
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all_flag
)
attrs
=
{
...
...
@@ -2428,7 +2428,7 @@ def conj(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
conj
(
x
)
return
_C_
ops
.
conj
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
'complex64'
,
'complex128'
,
'float32'
,
'float64'
,
'int32'
,
'int64'
],
'conj'
)
...
...
@@ -2467,7 +2467,7 @@ def digamma(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
digamma
(
x
)
return
_C_
ops
.
digamma
(
x
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'digamma'
)
helper
=
LayerHelper
(
'digamma'
,
**
locals
())
...
...
@@ -2543,7 +2543,7 @@ def atan2(y, x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
atan2
(
y
,
x
)
return
_C_
ops
.
atan2
(
y
,
x
)
else
:
check_variable_and_dtype
(
y
,
'y'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'int32'
,
'int64'
,
'float16'
,
'float32'
,
'float64'
],
'atan2'
)
...
...
python/paddle/tensor/random.py
浏览文件 @
f05098b5
...
...
@@ -20,6 +20,7 @@ from ..fluid.layer_helper import LayerHelper
from
..fluid.data_feeder
import
check_variable_and_dtype
,
check_type
,
check_dtype
,
check_shape
from
..fluid.layers
import
utils
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -65,7 +66,7 @@ def bernoulli(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
bernoulli
(
x
)
return
_C_
ops
.
bernoulli
(
x
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"bernoulli"
)
...
...
@@ -130,8 +131,8 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
"multinomial op is not supported on ROCM yet."
)
if
in_dygraph_mode
():
return
core
.
ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
)
return
_C_ops
.
multinomial
(
x
,
'num_samples'
,
num_samples
,
'replacement'
,
replacement
)
check_variable_and_dtype
(
x
,
"x"
,
[
"float32"
,
"float64"
],
"multinomial"
)
...
...
@@ -189,10 +190,9 @@ def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
core
.
ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
return
_C_
ops
.
gaussian_random
(
'shape'
,
shape
,
'mean'
,
float
(
mean
),
'std'
,
float
(
std
),
'seed'
,
seed
,
'dtype'
,
dtype
)
float
(
std
),
'seed'
,
seed
,
'dtype'
,
dtype
)
check_shape
(
shape
,
op_type_for_check
)
check_dtype
(
dtype
,
'dtype'
,
[
'float32'
,
'float64'
],
op_type_for_check
)
...
...
@@ -499,7 +499,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
core
.
ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
return
_C_
ops
.
uniform_random
(
'shape'
,
shape
,
'min'
,
float
(
min
),
'max'
,
float
(
max
),
'seed'
,
seed
,
'dtype'
,
dtype
)
...
...
@@ -599,8 +599,8 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None):
if
in_dygraph_mode
():
shape
=
utils
.
convert_shape_to_list
(
shape
)
return
core
.
ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
)
return
_C_ops
.
randint
(
'shape'
,
shape
,
'low'
,
low
,
'high'
,
high
,
'seed'
,
0
,
'dtype'
,
dtype
)
check_shape
(
shape
,
'randint'
)
check_dtype
(
dtype
,
'dtype'
,
[
'int32'
,
'int64'
],
'randint'
)
...
...
@@ -656,7 +656,7 @@ def randperm(n, dtype="int64", name=None):
dtype
=
convert_np_dtype_to_dtype_
(
dtype
)
if
in_dygraph_mode
():
return
core
.
ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
return
_C_
ops
.
randperm
(
'n'
,
n
,
'seed'
,
0
,
'dtype'
,
dtype
)
if
n
<
1
:
raise
ValueError
(
"The input n should be greater than 0 in randperm op."
)
...
...
python/paddle/tensor/search.py
浏览文件 @
f05098b5
...
...
@@ -20,6 +20,7 @@ from paddle.common_ops_import import in_dygraph_mode
from
paddle.common_ops_import
import
convert_np_dtype_to_dtype_
from
paddle.common_ops_import
import
Variable
from
paddle.common_ops_import
import
VarDesc
from
paddle
import
_C_ops
# TODO: define searching & indexing functions of a tensor
# from ..fluid.layers import has_inf #DEFINE_ALIAS
...
...
@@ -88,7 +89,7 @@ def argsort(x, axis=-1, descending=False, name=None):
# [0 2 1 1]]]
"""
if
in_dygraph_mode
():
_
,
ids
=
core
.
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
_
,
ids
=
_C_
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
ids
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
],
...
...
@@ -165,7 +166,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
if
in_dygraph_mode
():
out
=
core
.
ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_
ops
.
arg_max
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
return
out
...
...
@@ -242,7 +243,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
axis
=
0
if
in_dygraph_mode
():
out
=
core
.
ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
out
=
_C_
ops
.
arg_min
(
x
,
'axis'
,
axis
,
'dtype'
,
var_dtype
,
'keepdims'
,
keepdim
,
'flatten'
,
flatten
)
return
out
...
...
@@ -302,7 +303,7 @@ def index_select(x, index, axis=0, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
return
_C_
ops
.
index_select
(
x
,
index
,
'dim'
,
axis
)
helper
=
LayerHelper
(
"index_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
@@ -378,7 +379,7 @@ def nonzero(x, as_tuple=False):
rank
=
len
(
shape
)
if
in_dygraph_mode
():
outs
=
core
.
ops
.
where_index
(
x
)
outs
=
_C_
ops
.
where_index
(
x
)
else
:
outs
=
layers
.
where
(
x
)
...
...
@@ -452,7 +453,7 @@ def sort(x, axis=-1, descending=False, name=None):
# [5. 7. 7. 9.]]]
"""
if
in_dygraph_mode
():
out
,
_
=
core
.
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
out
,
_
=
_C_
ops
.
argsort
(
x
,
'axis'
,
axis
,
'descending'
,
descending
)
return
out
helper
=
LayerHelper
(
"sort"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
...
...
@@ -517,7 +518,7 @@ def where(condition, x, y, name=None):
y_shape
=
list
(
y
.
shape
)
if
x_shape
==
y_shape
:
if
in_dygraph_mode
():
return
core
.
ops
.
where
(
condition
,
x
,
y
)
return
_C_
ops
.
where
(
condition
,
x
,
y
)
else
:
helper
=
LayerHelper
(
"where"
,
**
locals
())
out
=
helper
.
create_variable_for_type_inference
(
dtype
=
x
.
dtype
)
...
...
@@ -612,7 +613,7 @@ def index_sample(x, index):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
index_sample
(
x
,
index
)
return
_C_
ops
.
index_sample
(
x
,
index
)
helper
=
LayerHelper
(
"index_sample"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
@@ -660,7 +661,7 @@ def masked_select(x, mask, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
masked_select
(
x
,
mask
)
return
_C_
ops
.
masked_select
(
x
,
mask
)
helper
=
LayerHelper
(
"masked_select"
,
**
locals
())
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
,
'int32'
,
'int64'
],
...
...
@@ -732,11 +733,11 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None):
if
in_dygraph_mode
():
k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
if
axis
is
None
:
out
,
indices
=
core
.
ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'largest'
,
largest
,
'sorted'
,
sorted
)
out
,
indices
=
_C_
ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'largest'
,
largest
,
'sorted'
,
sorted
)
else
:
out
,
indices
=
core
.
ops
.
top_k_v2
(
x
,
'k'
,
out
,
indices
=
_C_
ops
.
top_k_v2
(
x
,
'k'
,
int
(
k
),
'axis'
,
axis
,
'largest'
,
largest
,
'sorted'
,
sorted
)
return
out
,
indices
...
...
python/paddle/tensor/stat.py
浏览文件 @
f05098b5
...
...
@@ -22,6 +22,7 @@ from ..fluid import layers
from
.search
import
where
from
..fluid.data_feeder
import
convert_dtype
,
check_variable_and_dtype
,
check_type
,
check_dtype
import
paddle
from
paddle
import
_C_ops
__all__
=
[]
...
...
@@ -88,7 +89,7 @@ def mean(x, axis=None, keepdim=False, name=None):
axis
=
[
0
]
if
in_dygraph_mode
():
return
core
.
ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
return
_C_
ops
.
reduce_mean
(
x
,
'dim'
,
axis
,
'keep_dim'
,
keepdim
,
'reduce_all'
,
reduce_all
)
check_variable_and_dtype
(
x
,
'x/input'
,
[
'float32'
,
'float64'
],
...
...
@@ -236,7 +237,7 @@ def numel(x, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
size
(
x
)
return
_C_
ops
.
size
(
x
)
if
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"x must be a Tensor in numel"
)
...
...
python/paddle/vision/ops.py
浏览文件 @
f05098b5
...
...
@@ -21,6 +21,7 @@ from ..nn import Layer
from
..fluid.initializer
import
Normal
from
paddle.common_ops_import
import
*
from
paddle
import
_C_ops
__all__
=
[
#noqa
'yolo_loss'
,
...
...
@@ -189,7 +190,7 @@ def yolo_loss(x,
"""
if
in_dygraph_mode
()
and
gt_score
is
None
:
loss
=
core
.
ops
.
yolov3_loss
(
loss
=
_C_
ops
.
yolov3_loss
(
x
,
gt_box
,
gt_label
,
'anchors'
,
anchors
,
'anchor_mask'
,
anchor_mask
,
'class_num'
,
class_num
,
'ignore_thresh'
,
ignore_thresh
,
'downsample_ratio'
,
downsample_ratio
,
'use_label_smooth'
,
...
...
@@ -372,7 +373,7 @@ def yolo_box(x,
scale_x_y=1.)
"""
if
in_dygraph_mode
():
boxes
,
scores
=
core
.
ops
.
yolo_box
(
boxes
,
scores
=
_C_
ops
.
yolo_box
(
x
,
img_size
,
'anchors'
,
anchors
,
'class_num'
,
class_num
,
'conf_thresh'
,
conf_thresh
,
'downsample_ratio'
,
downsample_ratio
,
'clip_bbox'
,
clip_bbox
,
'scale_x_y'
,
scale_x_y
,
'iou_aware'
,
...
...
@@ -551,11 +552,10 @@ def deform_conv2d(x,
'im2col_step'
,
1
)
if
use_deform_conv2d_v1
:
op_type
=
'deformable_conv_v1'
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
offset
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_
ops
,
op_type
)(
x
,
offset
,
weight
,
*
attrs
)
else
:
op_type
=
'deformable_conv'
pre_bias
=
getattr
(
core
.
ops
,
op_type
)(
x
,
offset
,
mask
,
weight
,
*
attrs
)
pre_bias
=
getattr
(
_C_ops
,
op_type
)(
x
,
offset
,
mask
,
weight
,
*
attrs
)
if
bias
is
not
None
:
out
=
nn
.
elementwise_add
(
pre_bias
,
bias
,
axis
=
1
)
else
:
...
...
@@ -839,7 +839,7 @@ def read_file(filename, name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
read_file
(
'filename'
,
filename
)
return
_C_
ops
.
read_file
(
'filename'
,
filename
)
inputs
=
dict
()
attrs
=
{
'filename'
:
filename
}
...
...
@@ -886,7 +886,7 @@ def decode_jpeg(x, mode='unchanged', name=None):
"""
if
in_dygraph_mode
():
return
core
.
ops
.
decode_jpeg
(
x
,
"mode"
,
mode
)
return
_C_
ops
.
decode_jpeg
(
x
,
"mode"
,
mode
)
inputs
=
{
'X'
:
x
}
attrs
=
{
"mode"
:
mode
}
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录