Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
9f731a60
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
9f731a60
编写于
1月 19, 2018
作者:
Y
Yang Yu
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Add math operator patches
Users can use `a+b`, `a*10`.
上级
c73f00fe
变更
4
显示空白变更内容
内联
并排
Showing
4 changed file
with
347 addition
and
0 deletion
+347
-0
python/paddle/v2/fluid/__init__.py
python/paddle/v2/fluid/__init__.py
+2
-0
python/paddle/v2/fluid/layers/__init__.py
python/paddle/v2/fluid/layers/__init__.py
+3
-0
python/paddle/v2/fluid/layers/math_op_patch.py
python/paddle/v2/fluid/layers/math_op_patch.py
+161
-0
python/paddle/v2/fluid/tests/test_math_op_patch.py
python/paddle/v2/fluid/tests/test_math_op_patch.py
+181
-0
未找到文件。
python/paddle/v2/fluid/__init__.py
浏览文件 @
9f731a60
...
...
@@ -36,6 +36,7 @@ import clip
from
memory_optimization_transpiler
import
memory_optimize
Tensor
=
LoDTensor
__all__
=
framework
.
__all__
+
executor
.
__all__
+
[
'io'
,
'initializer'
,
...
...
@@ -93,4 +94,5 @@ def __bootstrap__():
core
.
init_devices
()
layers
.
monkey_patch_variable
()
__bootstrap__
()
python/paddle/v2/fluid/layers/__init__.py
浏览文件 @
9f731a60
...
...
@@ -23,6 +23,8 @@ import control_flow
from
control_flow
import
*
import
device
from
device
import
*
import
math_op_patch
from
math_op_patch
import
*
__all__
=
[]
__all__
+=
nn
.
__all__
...
...
@@ -31,3 +33,4 @@ __all__ += tensor.__all__
__all__
+=
control_flow
.
__all__
__all__
+=
ops
.
__all__
__all__
+=
device
.
__all__
__all__
+=
math_op_patch
.
__all__
python/paddle/v2/fluid/layers/math_op_patch.py
0 → 100644
浏览文件 @
9f731a60
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
..framework
import
Variable
,
unique_name
from
..registry
import
OpProtoHolder
__all__
=
[
'monkey_patch_variable'
]
def
monkey_patch_variable
():
def
new_name
():
return
unique_name
(
"tmp"
)
def
safe_get_dtype
(
var
):
try
:
dtype
=
var
.
dtype
except
:
raise
ValueError
(
"Cannot get data type from %s"
,
var
.
name
)
return
dtype
def
create_scalar
(
block
,
value
,
dtype
):
value
=
float
(
value
)
tmp_name
=
new_name
()
var
=
block
.
create_var
(
name
=
tmp_name
,
shape
=
[
1
],
dtype
=
dtype
)
block
.
append_op
(
type
=
"fill"
,
outputs
=
{
"Out"
:
[
var
]},
attrs
=
{
"value"
:
[
value
],
"shape"
:
[
1
],
"dtype"
:
dtype
})
return
var
def
create_tensor
(
block
,
value
,
dtype
,
shape
):
value
=
float
(
value
)
tmp_name
=
new_name
()
var
=
block
.
create_var
(
name
=
tmp_name
,
shape
=
shape
,
dtype
=
dtype
)
block
.
append_op
(
type
=
"fill_constant"
,
outputs
=
{
'Out'
:
[
var
]},
attrs
=
{
'dtype'
:
var
.
dtype
,
'shape'
:
shape
,
'value'
:
value
})
return
var
def
create_tensor_with_batchsize
(
ref_var
,
value
,
dtype
):
assert
isinstance
(
ref_var
,
Variable
)
value
=
float
(
value
)
tmp_name
=
new_name
()
var
=
ref_var
.
block
.
create_var
(
name
=
tmp_name
,
dtype
=
dtype
)
ref_var
.
block
.
append_op
(
type
=
'fill_constant_batch_size_like'
,
outputs
=
{
'Out'
:
[
var
]},
inputs
=
{
'Input'
:
[
ref_var
]},
attrs
=
{
'shape'
:
ref_var
.
shape
,
'value'
:
value
})
return
var
def
astype
(
self
,
dtype
):
"""
Cast a variable to data type.
NOTE: The variable must be a Tensor
Args:
self(Variable): The source variable
dtype: The target dtype
Returns:
Variable with new dtype
"""
tmp_name
=
new_name
()
out
=
self
.
block
.
create_var
(
name
=
tmp_name
,
dtype
=
dtype
)
self
.
block
.
append_op
(
type
=
"cast"
,
inputs
=
{
"X"
:
[
self
]},
outputs
=
{
"Out"
:
[
out
]},
attrs
=
{
"in_dtype"
:
self
.
dtype
,
"out_dtype"
:
out
.
dtype
})
return
out
def
_elemwise_method_creator_
(
method_name
,
op_type
,
reverse
=
False
):
def
__impl__
(
self
,
other_var
):
lhs_dtype
=
safe_get_dtype
(
self
)
if
not
isinstance
(
other_var
,
Variable
):
if
reverse
:
has_batch_size
=
False
for
elem
in
self
.
shape
:
if
elem
<
0
:
has_batch_size
=
True
break
if
not
has_batch_size
:
other_var
=
create_tensor
(
self
.
block
,
other_var
,
dtype
=
lhs_dtype
,
shape
=
self
.
shape
)
else
:
other_var
=
create_tensor_with_batchsize
(
self
,
other_var
,
lhs_dtype
)
else
:
# add fill_op to self.block
other_var
=
create_scalar
(
self
.
block
,
value
=
other_var
,
dtype
=
lhs_dtype
)
rhs_dtype
=
safe_get_dtype
(
other_var
)
if
lhs_dtype
!=
rhs_dtype
:
other_var
=
astype
(
other_var
,
lhs_dtype
)
if
reverse
:
tmp
=
self
self
=
other_var
other_var
=
tmp
tmp_name
=
new_name
()
out
=
self
.
block
.
create_var
(
name
=
tmp_name
,
dtype
=
lhs_dtype
)
self
.
block
.
append_op
(
type
=
op_type
,
inputs
=
{
'X'
:
[
self
],
'Y'
:
[
other_var
]},
outputs
=
{
'Out'
:
out
})
return
out
comment
=
OpProtoHolder
.
instance
().
get_op_proto
(
op_type
).
comment
__impl__
.
__doc__
=
"""
{0}
Args:
self(Variable): left hand variable
other_var(Variable|float|int): right hand variable
Returns:
Variable
"""
.
format
(
comment
)
__impl__
.
__name__
=
method_name
return
__impl__
# inject methods
for
method_name
,
op_type
,
reverse
in
(
(
"__add__"
,
"elementwise_add"
,
False
),
# a+b == b+a. Do not need to reverse explicitly
(
"__radd__"
,
"elementwise_add"
,
False
),
(
"__sub__"
,
"elementwise_sub"
,
False
),
(
"__rsub__"
,
"elementwise_sub"
,
True
),
(
"__mul__"
,
"elementwise_mul"
,
False
),
# a*b == b*a. Do not need to reverse explicitly
(
"__rmul__"
,
"elementwise_mul"
,
False
),
(
"__div__"
,
"elementwise_div"
,
False
),
(
"__rdiv__"
,
"elementwise_div"
,
True
)):
setattr
(
Variable
,
method_name
,
_elemwise_method_creator_
(
method_name
,
op_type
,
reverse
))
Variable
.
astype
=
astype
python/paddle/v2/fluid/tests/test_math_op_patch.py
0 → 100644
浏览文件 @
9f731a60
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
decorators
import
paddle.v2.fluid
as
fluid
import
numpy
class
TestMathOpPatches
(
unittest
.
TestCase
):
@
decorators
.
prog_scope
()
def
test_add_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
a
+
10
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
+
10
,
b_np
))
@
decorators
.
prog_scope
()
def
test_radd_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
10
+
a
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
+
10
,
b_np
))
@
decorators
.
prog_scope
()
def
test_sub_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
a
-
10
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
-
10
,
b_np
))
@
decorators
.
prog_scope
()
def
test_radd_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
10
-
a
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
10
-
a_np
,
b_np
))
@
decorators
.
prog_scope
()
def
test_mul_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
a
*
10
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
*
10
,
b_np
))
@
decorators
.
prog_scope
()
def
test_rmul_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
10
*
a
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
10
*
a_np
,
b_np
))
@
decorators
.
prog_scope
()
def
test_div_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
a
/
10
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
/
10
,
b_np
))
@
decorators
.
prog_scope
()
def
test_rdiv_scalar
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
10
/
a
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
+
1e-2
b_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
},
fetch_list
=
[
b
])
self
.
assertTrue
(
numpy
.
allclose
(
10
/
a_np
,
b_np
))
@
decorators
.
prog_scope
()
def
test_div_two_tensor
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
fluid
.
layers
.
data
(
name
=
"b"
,
shape
=
[
1
])
c
=
a
/
b
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
+
1e-2
c_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
,
'b'
:
b_np
},
fetch_list
=
[
c
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
/
b_np
,
c_np
))
@
decorators
.
prog_scope
()
def
test_mul_two_tensor
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
fluid
.
layers
.
data
(
name
=
"b"
,
shape
=
[
1
])
c
=
a
*
b
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
c_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
,
'b'
:
b_np
},
fetch_list
=
[
c
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
*
b_np
,
c_np
))
@
decorators
.
prog_scope
()
def
test_add_two_tensor
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
fluid
.
layers
.
data
(
name
=
"b"
,
shape
=
[
1
])
c
=
a
+
b
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
c_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
,
'b'
:
b_np
},
fetch_list
=
[
c
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
+
b_np
,
c_np
))
@
decorators
.
prog_scope
()
def
test_sub_two_tensor
(
self
):
a
=
fluid
.
layers
.
data
(
name
=
"a"
,
shape
=
[
1
])
b
=
fluid
.
layers
.
data
(
name
=
"b"
,
shape
=
[
1
])
c
=
a
-
b
place
=
fluid
.
CPUPlace
()
exe
=
fluid
.
Executor
(
place
)
a_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
b_np
=
numpy
.
random
.
random
(
size
=
[
10
,
1
]).
astype
(
'float32'
)
c_np
=
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
{
"a"
:
a_np
,
'b'
:
b_np
},
fetch_list
=
[
c
])
self
.
assertTrue
(
numpy
.
allclose
(
a_np
-
b_np
,
c_np
))
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录