Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
0e816260
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0e816260
编写于
8月 24, 2020
作者:
S
ShenLiang
提交者:
GitHub
8月 24, 2020
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
add div, floor_div, remainder (#26562)
* add div, floor_div, remainder
上级
973fdd67
变更
13
隐藏空白更改
内联
并排
Showing
13 changed file
with
601 addition
and
83 deletion
+601
-83
paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc
...le/fluid/operators/elementwise/elementwise_floordiv_op.cc
+2
-0
paddle/fluid/operators/elementwise/elementwise_floordiv_op.cu
...le/fluid/operators/elementwise/elementwise_floordiv_op.cu
+2
-0
paddle/fluid/operators/elementwise/elementwise_floordiv_op.h
paddle/fluid/operators/elementwise/elementwise_floordiv_op.h
+20
-3
python/paddle/fluid/dygraph/math_op_patch.py
python/paddle/fluid/dygraph/math_op_patch.py
+29
-10
python/paddle/fluid/layers/math_op_patch.py
python/paddle/fluid/layers/math_op_patch.py
+29
-12
python/paddle/fluid/tests/unittests/test_dist_transpiler_async_decay.py
...fluid/tests/unittests/test_dist_transpiler_async_decay.py
+2
-2
python/paddle/fluid/tests/unittests/test_elementwise_div_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_div_op.py
+115
-16
python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py
...dle/fluid/tests/unittests/test_elementwise_floordiv_op.py
+123
-17
python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py
...n/paddle/fluid/tests/unittests/test_elementwise_mod_op.py
+117
-17
python/paddle/fluid/tests/unittests/test_math_op_patch.py
python/paddle/fluid/tests/unittests/test_math_op_patch.py
+4
-4
python/paddle/fluid/tests/unittests/test_rnn_decode_api.py
python/paddle/fluid/tests/unittests/test_rnn_decode_api.py
+2
-1
python/paddle/nn/functional/loss.py
python/paddle/nn/functional/loss.py
+2
-1
python/paddle/tensor/math.py
python/paddle/tensor/math.py
+154
-0
未找到文件。
paddle/fluid/operators/elementwise/elementwise_floordiv_op.cc
浏览文件 @
0e816260
...
...
@@ -49,6 +49,8 @@ REGISTER_OP_WITHOUT_GRADIENT(elementwise_floordiv, ops::ElementwiseOp,
REGISTER_OP_CPU_KERNEL
(
elementwise_floordiv
,
ops
::
ElementwiseFloorDivKernel
<
paddle
::
platform
::
CPUDeviceContext
,
float
>
,
ops
::
ElementwiseFloorDivKernel
<
paddle
::
platform
::
CPUDeviceContext
,
double
>
,
ops
::
ElementwiseFloorDivKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int
>
,
ops
::
ElementwiseFloorDivKernel
<
paddle
::
platform
::
CPUDeviceContext
,
int64_t
>
);
paddle/fluid/operators/elementwise/elementwise_floordiv_op.cu
浏览文件 @
0e816260
...
...
@@ -19,5 +19,7 @@ namespace plat = paddle::platform;
REGISTER_OP_CUDA_KERNEL
(
elementwise_floordiv
,
ops
::
ElementwiseFloorDivKernel
<
plat
::
CUDADeviceContext
,
float
>
,
ops
::
ElementwiseFloorDivKernel
<
plat
::
CUDADeviceContext
,
double
>
,
ops
::
ElementwiseFloorDivKernel
<
plat
::
CUDADeviceContext
,
int
>
,
ops
::
ElementwiseFloorDivKernel
<
plat
::
CUDADeviceContext
,
int64_t
>
);
paddle/fluid/operators/elementwise/elementwise_floordiv_op.h
浏览文件 @
0e816260
...
...
@@ -14,6 +14,7 @@ limitations under the License. */
#pragma once
#include <math.h>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
...
...
@@ -24,7 +25,16 @@ namespace operators {
template
<
typename
T
>
struct
FloorDivFunctor
{
inline
HOSTDEVICE
T
operator
()(
T
a
,
T
b
)
const
{
return
a
/
b
;
}
inline
HOSTDEVICE
T
operator
()(
T
a
,
T
b
)
const
{
return
static_cast
<
T
>
(
floor
(
a
/
b
));
}
};
template
<
typename
T
>
struct
InverseFloorDivFunctor
{
inline
HOSTDEVICE
T
operator
()(
T
a
,
T
b
)
const
{
return
static_cast
<
T
>
(
floor
(
b
/
a
));
}
};
template
<
typename
DeviceContext
,
typename
T
>
...
...
@@ -32,8 +42,15 @@ void elementwise_floor_div(const framework::ExecutionContext &ctx,
const
framework
::
Tensor
*
x
,
const
framework
::
Tensor
*
y
,
framework
::
Tensor
*
z
)
{
int
axis
=
ctx
.
Attr
<
int
>
(
"axis"
);
ElementwiseComputeEx
<
FloorDivFunctor
<
T
>
,
DeviceContext
,
T
>
(
ctx
,
x
,
y
,
axis
,
FloorDivFunctor
<
T
>
(),
z
);
auto
x_dims
=
x
->
dims
();
auto
y_dims
=
y
->
dims
();
if
(
x_dims
.
size
()
>=
y_dims
.
size
())
{
ElementwiseComputeEx
<
FloorDivFunctor
<
T
>
,
DeviceContext
,
T
>
(
ctx
,
x
,
y
,
axis
,
FloorDivFunctor
<
T
>
(),
z
);
}
else
{
ElementwiseComputeEx
<
InverseFloorDivFunctor
<
T
>
,
DeviceContext
,
T
>
(
ctx
,
x
,
y
,
axis
,
InverseFloorDivFunctor
<
T
>
(),
z
);
}
}
template
<
typename
DeviceContext
,
typename
T
>
...
...
python/paddle/fluid/dygraph/math_op_patch.py
浏览文件 @
0e816260
...
...
@@ -19,6 +19,7 @@ from ..framework import Variable, convert_np_dtype_to_dtype_, _varbase_creator
from
..layers.layer_function_generator
import
OpProtoHolder
from
..layers
import
common_methods
from
.
import
to_variable
,
no_grad
import
paddle
import
numpy
as
np
import
six
...
...
@@ -162,6 +163,26 @@ def monkey_patch_math_varbase():
def
_scalar_div_
(
var
,
value
):
return
_scalar_elementwise_op_
(
var
,
1.0
/
value
,
0.0
)
# TODO(shenliang03): currently, it supports divide, floor_divide, remainder
# for binary operator by using the api to achieve the type promotion
def
_binary_method_creator_
(
op_type
,
reverse
=
False
):
import
paddle
def
__impl__
(
self
,
other_var
):
import
paddle
op
=
getattr
(
paddle
,
op_type
)
if
reverse
:
return
op
(
other_var
,
self
)
else
:
return
op
(
self
,
other_var
)
__impl__
.
__doc__
=
"""
See paddle.{}"""
.
format
(
op_type
)
__impl__
.
__name__
=
op_type
return
__impl__
# for binary operator such as elementwise, compare
def
_binary_creator_
(
method_name
,
op_type
,
...
...
@@ -260,22 +281,20 @@ def monkey_patch_math_varbase():
## a*b == b*a. Do not need to reverse explicitly
(
'__rmul__'
,
_binary_creator_
(
'__rmul__'
,
'elementwise_mul'
,
False
,
_scalar_mul_
)),
(
'__div__'
,
_binary_creator_
(
'__div__'
,
'elementwise_div'
,
False
,
_scalar_div_
)),
(
'__truediv__'
,
_binary_creator_
(
'__truediv__'
,
'elementwise_div'
,
False
,
_scalar_div_
)),
(
'__rdiv__'
,
_binary_creator_
(
'__rdiv__'
,
'elementwise_div'
,
True
,
None
)),
(
'__rtruediv__'
,
_binary_creator_
(
'rtruediv__'
,
'elementwise_div'
,
True
,
None
)),
(
'__pow__'
,
_binary_creator_
(
'__pow__'
,
'elementwise_pow'
,
False
,
None
)),
(
'__rpow__'
,
_binary_creator_
(
'__rpow__'
,
'elementwise_pow'
,
True
,
None
)),
(
'__floordiv__'
,
_binary_creator_
(
'__floordiv__'
,
'elementwise_floordiv'
,
False
,
None
)),
(
'__mod__'
,
_binary_creator_
(
'__mod__'
,
'elementwise_mod'
,
False
,
None
)),
# These binary use paddle.optype
(
'__div__'
,
_binary_method_creator_
(
'divide'
,
False
)),
(
'__truediv__'
,
_binary_method_creator_
(
'divide'
,
False
)),
(
'__rtruediv__'
,
_binary_method_creator_
(
'divide'
,
True
)),
(
'__rdiv__'
,
_binary_method_creator_
(
'divide'
,
True
)),
(
'__floordiv__'
,
_binary_method_creator_
(
'floor_divide'
,
False
)),
(
'__rfloordiv__'
,
_binary_method_creator_
(
'floor_divide'
,
True
)),
(
'__mod__'
,
_binary_method_creator_
(
'remainder'
,
False
)),
## for logical compare
(
'__eq__'
,
_binary_creator_
(
'__eq__'
,
'equal'
,
False
,
None
)),
(
'__ne__'
,
_binary_creator_
(
'__ne__'
,
'not_equal'
,
False
,
None
)),
...
...
python/paddle/fluid/layers/math_op_patch.py
浏览文件 @
0e816260
...
...
@@ -16,6 +16,7 @@ from __future__ import print_function
import
warnings
import
inspect
import
paddle
from
..
import
core
from
..framework
import
Variable
,
unique_name
...
...
@@ -45,6 +46,7 @@ EXPRESSION_MAP = {
"__pow__"
:
"A ** B"
,
"__rpow__"
:
"A **= B"
,
"__floordiv__"
:
"A //B"
,
"__rfloordiv__"
:
"A //= B"
,
"__mod__"
:
"A % B"
,
"__eq__"
:
"A == B"
,
"__ne__"
:
"A != B"
,
...
...
@@ -233,6 +235,25 @@ def monkey_patch_variable():
def
_scalar_div_
(
var
,
value
):
return
_scalar_op_
(
var
,
1.0
/
value
,
0.0
)
# TODO(shenliang03): currently, it supports divide, floor_divide, remainder
# for binary operator by using the api to achieve the type promotion
def
_binary_method_creator_
(
op_type
,
reverse
=
False
):
import
paddle
def
__impl__
(
self
,
other_var
):
op
=
getattr
(
paddle
,
op_type
)
if
reverse
:
return
op
(
other_var
,
self
)
else
:
return
op
(
self
,
other_var
)
__impl__
.
__doc__
=
"""
See paddle.{}"""
.
format
(
op_type
)
__impl__
.
__name__
=
op_type
return
__impl__
def
_binary_creator_
(
method_name
,
op_type
,
reverse
=
False
,
...
...
@@ -339,22 +360,18 @@ def monkey_patch_variable():
# a*b == b*a. Do not need to reverse explicitly
(
'__rmul__'
,
_binary_creator_
(
'__rmul__'
,
'elementwise_mul'
,
False
,
_scalar_mul_
)),
(
'__div__'
,
_binary_creator_
(
'__div__'
,
'elementwise_div'
,
False
,
_scalar_div_
)),
(
'__truediv__'
,
_binary_creator_
(
'__truediv__'
,
'elementwise_div'
,
False
,
_scalar_div_
)),
(
'__rdiv__'
,
_binary_creator_
(
'__rdiv__'
,
'elementwise_div'
,
True
,
None
)),
(
'__rtruediv__'
,
_binary_creator_
(
'__rtruediv__'
,
'elementwise_div'
,
True
,
None
)),
(
'__pow__'
,
_binary_creator_
(
'__pow__'
,
'elementwise_pow'
,
False
,
None
)),
(
'__rpow__'
,
_binary_creator_
(
'__rpow__'
,
'elementwise_pow'
,
True
,
None
)),
(
'__floordiv__'
,
_binary_creator_
(
'__floordiv__'
,
'elementwise_floordiv'
,
False
,
None
)),
(
'__mod__'
,
_binary_creator_
(
'__mod__'
,
'elementwise_mod'
,
False
,
None
)),
# These binary use paddle.optype
(
'__div__'
,
_binary_method_creator_
(
'divide'
,
False
)),
(
'__rdiv__'
,
_binary_method_creator_
(
'divide'
,
True
)),
(
'__truediv__'
,
_binary_method_creator_
(
'divide'
,
False
)),
(
'__rtruediv__'
,
_binary_method_creator_
(
'divide'
,
True
)),
(
'__floordiv__'
,
_binary_method_creator_
(
'floor_divide'
,
False
)),
(
'__rfloordiv__'
,
_binary_method_creator_
(
'floor_divide'
,
True
)),
(
'__mod__'
,
_binary_method_creator_
(
'remainder'
,
False
)),
# for logical compare
(
'__eq__'
,
_binary_creator_
(
'__eq__'
,
'equal'
,
False
,
None
)),
(
'__ne__'
,
_binary_creator_
(
'__ne__'
,
'not_equal'
,
False
,
None
)),
...
...
python/paddle/fluid/tests/unittests/test_dist_transpiler_async_decay.py
浏览文件 @
0e816260
...
...
@@ -113,8 +113,8 @@ class TranspilerAsyncLRDecayTest(unittest.TestCase):
[
"listen_and_serv"
])
# block1: sum,cast,scale,floor,fill_constant,elementwise_pow,scale
self
.
assertEqual
([
op
.
type
for
op
in
pserver
.
blocks
[
1
].
ops
],
[
"sum"
,
"cast"
,
"
scale"
,
"floor"
,
"fill_constant"
,
"elementwise_pow
"
,
"scale"
"sum"
,
"cast"
,
"
fill_constant"
,
"elementwise_div"
,
"floor
"
,
"
fill_constant"
,
"elementwise_pow"
,
"
scale"
])
# block1~2: optimize pass
...
...
python/paddle/fluid/tests/unittests/test_elementwise_div_op.py
浏览文件 @
0e816260
...
...
@@ -240,25 +240,124 @@ class TestElementwiseDivBroadcast(unittest.TestCase):
self
.
assertEqual
((
out_result
==
(
2
/
x
)).
all
(),
True
)
class
TestDivideOp
(
unittest
.
TestCase
):
def
test_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
],
dtype
=
"float32"
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
'float32'
)
class
TestDivideAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
paddle
.
set_default_dtype
(
"float64"
)
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
self
.
places
.
append
(
fluid
.
CUDAPlace
(
0
))
def
check_static_result
(
self
,
place
):
# rule 1
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
np
.
array
([
1
,
2
,
3
])
self
.
assertRaises
(
TypeError
,
paddle
.
divide
,
x
=
x
,
y
=
y
)
# rule 2: both the inputs are not Tensor
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
2
y
=
4
res
=
paddle
.
divide
(
x
,
y
)
exe
=
fluid
.
Executor
(
place
)
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{},
fetch_list
=
[
res
])
self
.
assertEqual
(
np_z
[
0
]
==
0.5
,
True
)
# rule 3:
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
"float32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
divide
,
x
=
x
,
y
=
y
)
# rule 4: x is Tensor, y is scalar
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
2
exe
=
fluid
.
Executor
(
place
)
res
=
x
/
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
)},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
1.
,
1.5
,
2.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
# rule 5: y is Tensor, x is scalar
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
2
exe
=
fluid
.
Executor
(
place
)
res
=
y
/
x
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
8
,
4
]).
astype
(
'float64'
)},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
1.
,
0.25
,
0.5
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
# rule 6: y is Tensor, x is Tensor
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
"float64"
)
exe
=
fluid
.
Executor
(
place
)
res
=
x
/
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
),
"y"
:
np
.
array
([
1
,
5
,
2
]).
astype
(
'float64'
)
},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
2.
,
0.6
,
2.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
y_1
=
paddle
.
divide
(
x
,
y
,
name
=
'div_res'
)
self
.
assertEqual
((
'div_res'
in
y_1
.
name
),
True
)
def
test_static
(
self
):
for
place
in
self
.
places
:
self
.
check_static_result
(
place
=
place
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
np_x
=
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
)
np_y
=
np
.
array
([
1
,
5
,
2
]).
astype
(
'float64'
)
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
paddle
.
divide
(
x
,
y
)
np_z
=
z
.
numpy
()
z_expected
=
np
.
array
([
2.
,
0.6
,
2.
])
self
.
assertEqual
((
np_z
==
z_expected
).
all
(),
True
)
for
place
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
place
):
# rule 1 : avoid numpy.ndarray
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
)
self
.
assertRaises
(
TypeError
,
paddle
.
divide
,
x
=
x
,
y
=
np_y
)
# rule 2: both the inputs are not Tensor
z
=
paddle
.
divide
(
3
,
2
)
self
.
assertEqual
(
z
.
numpy
()[
0
]
==
1.5
,
True
)
# rule 3: both the inputs are Tensor
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"float32"
)
y
=
paddle
.
to_tensor
(
np_y
,
dtype
=
"float64"
)
self
.
assertRaises
(
TypeError
,
paddle
.
divide
,
x
=
x
,
y
=
y
)
# rule 4: x is Tensor, y is scalar
np_x
=
np
.
array
([
2
,
3
,
4
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"int32"
)
y
=
2
z
=
x
/
y
z_expected
=
np
.
array
([
1.
,
1.5
,
2.
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
# rule 5: y is Tensor, x is scalar
np_x
=
np
.
array
([
2
,
1
,
4
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"int32"
)
y
=
2
z
=
y
/
x
z_expected
=
np
.
array
([
1.
,
2.
,
0.5
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
# rule 6: y is Tensor, x is Tensor
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
x
/
y
z_expected
=
np
.
array
([
2.
,
0.6
,
2.
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_elementwise_floordiv_op.py
浏览文件 @
0e816260
...
...
@@ -58,6 +58,13 @@ class TestElementwiseModOp(OpTest):
pass
class
TestElementwiseModOpInverse
(
TestElementwiseModOp
):
def
init_input_output
(
self
):
self
.
x
=
np
.
random
.
uniform
(
0
,
10000
,
[
10
]).
astype
(
self
.
dtype
)
self
.
y
=
np
.
random
.
uniform
(
0
,
1000
,
[
10
,
10
]).
astype
(
self
.
dtype
)
self
.
out
=
np
.
floor_divide
(
self
.
x
,
self
.
y
)
class
TestElementwiseModOp_scalar
(
TestElementwiseModOp
):
def
init_input_output
(
self
):
scale_x
=
random
.
randint
(
0
,
100000000
)
...
...
@@ -67,25 +74,124 @@ class TestElementwiseModOp_scalar(TestElementwiseModOp):
self
.
out
=
np
.
floor_divide
(
self
.
x
,
self
.
y
)
class
TestFloorDivideOp
(
unittest
.
TestCase
):
def
test_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
],
dtype
=
"int64"
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
'int64'
)
y_1
=
paddle
.
floor_divide
(
x
,
y
,
name
=
'div_res'
)
self
.
assertEqual
((
'div_res'
in
y_1
.
name
),
True
)
class
TestFloorDivideAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
paddle
.
set_default_dtype
(
"float64"
)
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
self
.
places
.
append
(
fluid
.
CUDAPlace
(
0
))
def
check_static_result
(
self
,
place
):
# rule 1
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
np
.
array
([
1
,
2
,
3
])
self
.
assertRaises
(
TypeError
,
paddle
.
floor_divide
,
x
=
x
,
y
=
y
)
# rule 2: both the inputs are not Tensor
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
2
y
=
4
res
=
paddle
.
floor_divide
(
x
,
y
)
exe
=
fluid
.
Executor
(
place
)
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{},
fetch_list
=
[
res
])
self
.
assertEqual
(
np_z
[
0
]
==
0.
,
True
)
# rule 3:
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
"float32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
floor_divide
,
x
=
x
,
y
=
y
)
# rule 4: x is Tensor, y is scalar
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
2
exe
=
fluid
.
Executor
(
place
)
res
=
x
//
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
)},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
1.
,
1.
,
2.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
# rule 5: y is Tensor, x is scalar
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
2
exe
=
fluid
.
Executor
(
place
)
res
=
y
//
x
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
8
,
4
]).
astype
(
'float64'
)},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
1.
,
0.
,
0.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
# rule 6: y is Tensor, x is Tensor
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
"float64"
)
exe
=
fluid
.
Executor
(
place
)
res
=
x
//
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
),
"y"
:
np
.
array
([
1
,
5
,
2
]).
astype
(
'float64'
)
},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
2.
,
0.
,
2.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
def
test_static
(
self
):
for
place
in
self
.
places
:
self
.
check_static_result
(
place
=
place
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
np_x
=
np
.
array
([
2
,
3
,
8
,
7
]).
astype
(
'int64'
)
np_y
=
np
.
array
([
1
,
5
,
3
,
3
]).
astype
(
'int64'
)
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
paddle
.
floor_divide
(
x
,
y
)
np_z
=
z
.
numpy
()
z_expected
=
np
.
array
([
2
,
0
,
2
,
2
])
self
.
assertEqual
((
np_z
==
z_expected
).
all
(),
True
)
for
place
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
place
):
# rule 1 : avoid numpy.ndarray
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
)
self
.
assertRaises
(
TypeError
,
paddle
.
floor_divide
,
x
=
x
,
y
=
np_y
)
# rule 2: both the inputs are not Tensor
z
=
paddle
.
floor_divide
(
3
,
2
)
self
.
assertEqual
(
z
.
numpy
()[
0
]
==
1.
,
True
)
# rule 3: both the inputs are Tensor
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"float32"
)
y
=
paddle
.
to_tensor
(
np_y
,
dtype
=
"float64"
)
self
.
assertRaises
(
TypeError
,
paddle
.
floor_divide
,
x
=
x
,
y
=
y
)
# rule 4: x is Tensor, y is scalar
np_x
=
np
.
array
([
2
,
3
,
4
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"int32"
)
y
=
2
z
=
x
//
y
z_expected
=
np
.
array
([
1
,
1
,
2
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
# rule 5: y is Tensor, x is scalar
np_x
=
np
.
array
([
2
,
1
,
4
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"int32"
)
y
=
2
z
=
y
//
x
z_expected
=
np
.
array
([
1
,
2
,
0
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
# rule 6: y is Tensor, x is Tensor
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
x
//
y
z_expected
=
np
.
array
([
2.
,
0.
,
2.
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_elementwise_mod_op.py
浏览文件 @
0e816260
...
...
@@ -84,25 +84,125 @@ class TestElementwiseModOpDouble(TestElementwiseModOpFloat):
self
.
dtype
=
np
.
float64
class
TestRemainderOp
(
unittest
.
TestCase
):
def
test_name
(
self
):
with
fluid
.
program_guard
(
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
2
,
3
],
dtype
=
"int64"
)
y
=
fluid
.
data
(
name
=
'y'
,
shape
=
[
2
,
3
],
dtype
=
'int64'
)
y_1
=
paddle
.
remainder
(
x
,
y
,
name
=
'div_res'
)
self
.
assertEqual
((
'div_res'
in
y_1
.
name
),
True
)
class
TestRemainderAPI
(
unittest
.
TestCase
):
def
setUp
(
self
):
paddle
.
set_default_dtype
(
"float64"
)
self
.
places
=
[
fluid
.
CPUPlace
()]
if
core
.
is_compiled_with_cuda
():
self
.
places
.
append
(
fluid
.
CUDAPlace
(
0
))
def
check_static_result
(
self
,
place
):
# rule 1
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
np
.
array
([
1
,
2
,
3
])
self
.
assertRaises
(
TypeError
,
paddle
.
remainder
,
x
=
x
,
y
=
y
)
# rule 3:
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
"float32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
remainder
,
x
=
x
,
y
=
y
)
# rule 4: x is Tensor, y is scalar
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
2
exe
=
fluid
.
Executor
(
place
)
res
=
x
%
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
2
,
3
,
4
]).
astype
(
'float64'
)},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
0.
,
1.
,
0.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
# rule 5: y is Tensor, x is scalar
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
3
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
3
],
dtype
=
"float32"
)
self
.
assertRaises
(
TypeError
,
paddle
.
remainder
,
x
=
x
,
y
=
y
)
# rule 6: y is Tensor, x is Tensor
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
3
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
1
],
dtype
=
"float64"
)
exe
=
fluid
.
Executor
(
place
)
res
=
x
%
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
1.
,
2.
,
4
]).
astype
(
'float64'
),
"y"
:
np
.
array
([
1.5
]).
astype
(
'float64'
)
},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
1.
,
0.5
,
1.0
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
# rule 6: y is Tensor, x is Tensor
with
fluid
.
program_guard
(
fluid
.
Program
(),
fluid
.
Program
()):
x
=
fluid
.
data
(
name
=
"x"
,
shape
=
[
6
],
dtype
=
"float64"
)
y
=
fluid
.
data
(
name
=
"y"
,
shape
=
[
1
],
dtype
=
"float64"
)
exe
=
fluid
.
Executor
(
place
)
res
=
x
%
y
np_z
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"x"
:
np
.
array
([
-
3.
,
-
2
,
-
1
,
1
,
2
,
3
]).
astype
(
'float64'
),
"y"
:
np
.
array
([
2
]).
astype
(
'float64'
)
},
fetch_list
=
[
res
])
z_expected
=
np
.
array
([
1.
,
0.
,
1.
,
1.
,
0.
,
1.
])
self
.
assertEqual
((
np_z
[
0
]
==
z_expected
).
all
(),
True
)
def
test_static
(
self
):
for
place
in
self
.
places
:
self
.
check_static_result
(
place
=
place
)
def
test_dygraph
(
self
):
with
fluid
.
dygraph
.
guard
():
np_x
=
np
.
array
([
2
,
3
,
8
,
7
]).
astype
(
'int64'
)
np_y
=
np
.
array
([
1
,
5
,
3
,
3
]).
astype
(
'int64'
)
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
paddle
.
remainder
(
x
,
y
)
np_z
=
z
.
numpy
()
z_expected
=
np
.
array
([
0
,
3
,
2
,
1
])
self
.
assertEqual
((
np_z
==
z_expected
).
all
(),
True
)
for
place
in
self
.
places
:
with
fluid
.
dygraph
.
guard
(
place
):
# rule 1 : avoid numpy.ndarray
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
)
self
.
assertRaises
(
TypeError
,
paddle
.
remainder
,
x
=
x
,
y
=
np_y
)
# rule 3: both the inputs are Tensor
np_x
=
np
.
array
([
2
,
3
,
4
])
np_y
=
np
.
array
([
1
,
5
,
2
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"float32"
)
y
=
paddle
.
to_tensor
(
np_y
,
dtype
=
"float64"
)
self
.
assertRaises
(
TypeError
,
paddle
.
remainder
,
x
=
x
,
y
=
y
)
# rule 4: x is Tensor, y is scalar
np_x
=
np
.
array
([
2
,
3
,
4
])
x
=
paddle
.
to_tensor
(
np_x
,
dtype
=
"int32"
)
y
=
2
z
=
x
%
y
z_expected
=
np
.
array
([
0
,
1
,
0
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
# rule 5: y is Tensor, x is scalar
np_x
=
np
.
array
([
2
,
3
,
4
])
x
=
paddle
.
to_tensor
(
np_x
)
self
.
assertRaises
(
TypeError
,
paddle
.
remainder
,
x
=
3
,
y
=
x
)
# rule 6: y is Tensor, x is Tensor
np_x
=
np
.
array
([
1.
,
2.
,
4
])
np_y
=
np
.
array
([
1.5
])
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
x
%
y
z_expected
=
np
.
array
([
1.
,
0.5
,
1.0
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
# rule 6: y is Tensor, x is Tensor
np_x
=
np
.
array
([
-
3.
,
-
2
,
-
1
,
1
,
2
,
3
])
np_y
=
np
.
array
([
2.
])
x
=
paddle
.
to_tensor
(
np_x
)
y
=
paddle
.
to_tensor
(
np_y
)
z
=
x
%
y
z_expected
=
np
.
array
([
1.
,
0.
,
1.
,
1.
,
0.
,
1.
])
self
.
assertEqual
((
z_expected
==
z
.
numpy
()).
all
(),
True
)
if
__name__
==
'__main__'
:
...
...
python/paddle/fluid/tests/unittests/test_math_op_patch.py
浏览文件 @
0e816260
...
...
@@ -189,15 +189,15 @@ class TestMathOpPatches(unittest.TestCase):
@
prog_scope
()
def
test_integer_div
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
],
dtype
=
'int64'
)
b
=
a
/
7
b
=
a
/
2
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
array
([
3
,
4
,
10
,
14
,
9
,
18
])
.
astype
(
'int64'
)
a_np
=
numpy
.
array
([
3
,
4
,
10
,
14
,
9
,
18
])
b_np
,
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
b_np_actual
=
(
a_np
/
7
).
astype
(
'int64'
)
# for paddle2.0, use true_divide
b_np_actual
=
(
a_np
/
2.0
)
self
.
assertTrue
(
numpy
.
array_equal
(
b_np
,
b_np_actual
))
@
prog_scope
()
...
...
python/paddle/fluid/tests/unittests/test_rnn_decode_api.py
浏览文件 @
0e816260
...
...
@@ -248,7 +248,8 @@ class PolicyGradient(object):
func
=
reward_func
,
x
=
[
action
,
length
],
out
=
reward
)
neg_log_prob
=
layers
.
cross_entropy
(
act_prob
,
action
)
cost
=
neg_log_prob
*
reward
cost
=
(
layers
.
reduce_sum
(
cost
)
/
layers
.
reduce_sum
(
length
)
cost
=
(
layers
.
reduce_sum
(
cost
)
/
layers
.
cast
(
layers
.
reduce_sum
(
length
),
"float32"
)
)
if
length
is
not
None
else
layers
.
reduce_mean
(
cost
)
optimizer
=
fluid
.
optimizer
.
Adam
(
self
.
lr
)
optimizer
.
minimize
(
cost
)
...
...
python/paddle/nn/functional/loss.py
浏览文件 @
0e816260
...
...
@@ -1034,7 +1034,8 @@ def ctc_loss(log_probs,
loss_out
=
fluid
.
layers
.
squeeze
(
loss_out
,
[
-
1
])
assert
reduction
in
[
'mean'
,
'sum'
,
'none'
]
if
reduction
==
'mean'
:
loss_out
=
paddle
.
mean
(
loss_out
/
label_lengths
)
loss_out
=
paddle
.
mean
(
loss_out
/
paddle
.
cast
(
label_lengths
,
loss_out
.
dtype
))
elif
reduction
==
'sum'
:
loss_out
=
paddle
.
sum
(
loss_out
)
return
loss_out
...
...
python/paddle/tensor/math.py
浏览文件 @
0e816260
...
...
@@ -62,6 +62,7 @@ from ..fluid.layers import increment #DEFINE_ALIAS
from
..fluid.layers
import
multiplex
#DEFINE_ALIAS
from
..fluid.layers
import
sums
#DEFINE_ALIAS
from
..fluid
import
layers
import
paddle
__all__
=
[
'abs'
,
...
...
@@ -133,6 +134,19 @@ __all__ = [
]
# yapf: enable.
_supported_int_dtype_
=
[
VarDesc
.
VarType
.
UINT8
,
VarDesc
.
VarType
.
INT8
,
VarDesc
.
VarType
.
INT16
,
VarDesc
.
VarType
.
INT32
,
VarDesc
.
VarType
.
INT64
,
]
_supported_float_dtype_
=
[
VarDesc
.
VarType
.
FP32
,
VarDesc
.
VarType
.
FP64
,
]
@
templatedoc
()
def
pow
(
input
,
exponent
,
name
=
None
):
"""
...
...
@@ -308,9 +322,69 @@ def divide(x, y, name=None):
axis
=
-
1
act
=
None
if
in_dygraph_mode
():
# rule 1 : avoid numpy.ndarray
if
isinstance
(
x
,
numpy
.
ndarray
)
or
isinstance
(
y
,
numpy
.
ndarray
):
raise
TypeError
(
"divide(): arguments must be Tensor or scalar, not numpy.ndarray."
)
# rule 2: both the inputs are not Tensor
elif
not
isinstance
(
x
,
paddle
.
Tensor
)
and
not
isinstance
(
y
,
paddle
.
Tensor
):
x
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
fill_value
=
x
)
y
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
fill_value
=
y
)
# rule 3: both the inputs are Tensor
elif
isinstance
(
x
,
paddle
.
Tensor
)
and
isinstance
(
y
,
paddle
.
Tensor
):
if
y
.
dtype
!=
x
.
dtype
:
raise
TypeError
(
"divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}"
.
format
(
x
.
dtype
,
y
.
dtype
))
elif
x
.
dtype
in
_supported_int_dtype_
:
x
=
x
.
astype
(
paddle
.
get_default_dtype
())
y
=
y
.
astype
(
paddle
.
get_default_dtype
())
# rule 4: x is Tensor, y is scalar
elif
isinstance
(
x
,
paddle
.
Tensor
)
and
not
isinstance
(
y
,
paddle
.
Tensor
):
if
x
.
dtype
in
_supported_int_dtype_
:
x
=
x
.
astype
(
paddle
.
get_default_dtype
())
y
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
fill_value
=
y
)
# rule 5: x is scalar, y is Tensor
elif
not
isinstance
(
x
,
paddle
.
Tensor
)
and
isinstance
(
y
,
paddle
.
Tensor
):
if
y
.
dtype
in
_supported_int_dtype_
:
y
=
y
.
astype
(
paddle
.
get_default_dtype
())
x
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
y
.
dtype
,
fill_value
=
x
)
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
act
=
act
,
op_name
=
op_type
)
# rule 1 : avoid numpy.ndarray
if
isinstance
(
x
,
numpy
.
ndarray
)
or
isinstance
(
y
,
numpy
.
ndarray
):
raise
TypeError
(
"divide(): arguments must be Tensor or scalar, not numpy.ndarray."
)
# rule 2: both the inputs are not Tensor
elif
not
isinstance
(
x
,
Variable
)
and
not
isinstance
(
y
,
Variable
):
x
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
value
=
x
)
y
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
value
=
y
)
# rule 3: both the inputs are Tensor
elif
isinstance
(
x
,
Variable
)
and
isinstance
(
y
,
Variable
):
if
y
.
dtype
!=
x
.
dtype
:
raise
TypeError
(
"divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}"
.
format
(
x
.
dtype
,
y
.
dtype
))
elif
x
.
dtype
in
_supported_int_dtype_
:
x
=
paddle
.
cast
(
x
,
paddle
.
get_default_dtype
())
y
=
paddle
.
cast
(
y
,
paddle
.
get_default_dtype
())
# rule 4: x is Tensor, y is scalar
elif
isinstance
(
x
,
Variable
)
and
not
isinstance
(
y
,
Variable
):
if
x
.
dtype
in
_supported_int_dtype_
:
x
=
paddle
.
cast
(
x
,
paddle
.
get_default_dtype
())
y
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
value
=
y
)
# rule 5: x is scalar, y is Tensor
elif
not
isinstance
(
x
,
Variable
)
and
isinstance
(
y
,
Variable
):
if
y
.
dtype
in
_supported_int_dtype_
:
y
=
paddle
.
cast
(
y
,
paddle
.
get_default_dtype
())
x
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
y
.
dtype
,
value
=
x
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -352,9 +426,55 @@ def floor_divide(x, y, name=None):
op_type
=
'elementwise_floordiv'
axis
=
-
1
if
in_dygraph_mode
():
# rule 1 : avoid numpy.ndarray
if
isinstance
(
x
,
numpy
.
ndarray
)
or
isinstance
(
y
,
numpy
.
ndarray
):
raise
TypeError
(
"floor_divide(): arguments must be Tensor or scalar, not numpy.ndarray."
)
# rule 2: both the inputs are not Tensor
elif
not
isinstance
(
x
,
paddle
.
Tensor
)
and
not
isinstance
(
y
,
paddle
.
Tensor
):
x
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
fill_value
=
x
)
y
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
fill_value
=
y
)
# rule 3: both the inputs are Tensor
elif
isinstance
(
x
,
paddle
.
Tensor
)
and
isinstance
(
y
,
paddle
.
Tensor
):
if
y
.
dtype
!=
x
.
dtype
:
raise
TypeError
(
"floor_divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}"
.
format
(
x
.
dtype
,
y
.
dtype
))
# rule 4: x is Tensor, y is scalar
elif
isinstance
(
x
,
paddle
.
Tensor
)
and
not
isinstance
(
y
,
paddle
.
Tensor
):
y
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
fill_value
=
y
)
# rule 5: x is scalar, y is Tensor
elif
not
isinstance
(
x
,
paddle
.
Tensor
)
and
isinstance
(
y
,
paddle
.
Tensor
):
x
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
y
.
dtype
,
fill_value
=
x
)
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
# rule 1 : avoid numpy.ndarray
if
isinstance
(
x
,
numpy
.
ndarray
)
or
isinstance
(
y
,
numpy
.
ndarray
):
raise
TypeError
(
"divide(): arguments must be Tensor or scalar, not numpy.ndarray."
)
# rule 2: both the inputs are not Tensor
elif
not
isinstance
(
x
,
Variable
)
and
not
isinstance
(
y
,
Variable
):
x
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
value
=
x
)
y
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
paddle
.
get_default_dtype
(),
value
=
y
)
# rule 3: both the inputs are Tensor
elif
isinstance
(
x
,
Variable
)
and
isinstance
(
y
,
Variable
):
if
y
.
dtype
!=
x
.
dtype
:
raise
TypeError
(
"divide(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}"
.
format
(
x
.
dtype
,
y
.
dtype
))
# rule 4: x is Tensor, y is scalar
elif
isinstance
(
x
,
Variable
)
and
not
isinstance
(
y
,
Variable
):
y
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
value
=
y
)
# rule 5: x is scalar, y is Tensor
elif
not
isinstance
(
x
,
Variable
)
and
isinstance
(
y
,
Variable
):
x
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
y
.
dtype
,
value
=
x
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
@@ -396,9 +516,43 @@ def remainder(x, y, name=None):
op_type
=
'elementwise_mod'
axis
=
-
1
if
in_dygraph_mode
():
# rule 1 : avoid numpy.ndarray
if
isinstance
(
x
,
numpy
.
ndarray
)
or
isinstance
(
y
,
numpy
.
ndarray
):
raise
TypeError
(
"remainder(): arguments must be Tensor or scalar, not numpy.ndarray."
)
elif
not
isinstance
(
x
,
paddle
.
Tensor
):
raise
TypeError
(
"remainder(): arguments position 1 must be Tensor, not {}"
.
format
(
type
(
x
)))
# rule 3: both the inputs are Tensor
elif
isinstance
(
y
,
paddle
.
Tensor
):
if
y
.
dtype
!=
x
.
dtype
:
raise
TypeError
(
"remainder(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}"
.
format
(
x
.
dtype
,
y
.
dtype
))
# rule 4: x is Tensor, y is scalar
elif
not
isinstance
(
y
,
paddle
.
Tensor
):
y
=
paddle
.
full
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
fill_value
=
y
)
return
_elementwise_op_in_dygraph
(
x
,
y
,
axis
=
axis
,
op_name
=
op_type
)
# rule 1 : avoid numpy.ndarray
if
isinstance
(
x
,
numpy
.
ndarray
)
or
isinstance
(
y
,
numpy
.
ndarray
):
raise
TypeError
(
"remainder(): arguments must be Tensor or scalar, not numpy.ndarray."
)
elif
not
isinstance
(
x
,
Variable
):
raise
TypeError
(
"remainder(): arguments position 1 must be Tensor, not {}"
.
format
(
type
(
x
)))
# rule 3: both the inputs are Tensor
elif
isinstance
(
y
,
Variable
):
if
y
.
dtype
!=
x
.
dtype
:
raise
TypeError
(
"remainder(): argument position 1 and argument position 2 must have the same dtype."
"But x is {}, y is {}"
.
format
(
x
.
dtype
,
y
.
dtype
))
# rule 4: x is Tensor, y is scalar
elif
not
isinstance
(
y
,
paddle
.
Tensor
):
y
=
paddle
.
fill_constant
(
shape
=
[
1
],
dtype
=
x
.
dtype
,
value
=
y
)
return
_elementwise_op
(
LayerHelper
(
op_type
,
**
locals
()))
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录