Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
e8cb97b8
P
Paddle
项目概览
PaddlePaddle
/
Paddle
1 年多 前同步成功
通知
2302
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e8cb97b8
编写于
2月 24, 2018
作者:
Y
Yu Yang
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Moving unique_name to python
* Add reset and guard to unique_name
上级
78cc64a5
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
134 addition
and
86 deletion
+134
-86
paddle/fluid/pybind/pybind.cc
paddle/fluid/pybind/pybind.cc
+0
-6
python/paddle/v2/fluid/__init__.py
python/paddle/v2/fluid/__init__.py
+6
-20
python/paddle/v2/fluid/backward.py
python/paddle/v2/fluid/backward.py
+2
-1
python/paddle/v2/fluid/evaluator.py
python/paddle/v2/fluid/evaluator.py
+3
-2
python/paddle/v2/fluid/framework.py
python/paddle/v2/fluid/framework.py
+2
-21
python/paddle/v2/fluid/layer_helper.py
python/paddle/v2/fluid/layer_helper.py
+17
-12
python/paddle/v2/fluid/layers/control_flow.py
python/paddle/v2/fluid/layers/control_flow.py
+20
-16
python/paddle/v2/fluid/layers/device.py
python/paddle/v2/fluid/layers/device.py
+2
-1
python/paddle/v2/fluid/layers/math_op_patch.py
python/paddle/v2/fluid/layers/math_op_patch.py
+1
-1
python/paddle/v2/fluid/optimizer.py
python/paddle/v2/fluid/optimizer.py
+7
-6
python/paddle/v2/fluid/unique_name.py
python/paddle/v2/fluid/unique_name.py
+74
-0
未找到文件。
paddle/fluid/pybind/pybind.cc
浏览文件 @
e8cb97b8
...
@@ -48,11 +48,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
...
@@ -48,11 +48,6 @@ PYBIND11_MAKE_OPAQUE(paddle::framework::LoDTensorArray);
namespace
paddle
{
namespace
paddle
{
namespace
pybind
{
namespace
pybind
{
static
size_t
UniqueIntegerGenerator
(
const
std
::
string
&
prefix
)
{
static
std
::
unordered_map
<
std
::
string
,
std
::
atomic
<
size_t
>>
generators
;
return
generators
[
prefix
].
fetch_add
(
1
);
}
bool
IsCompiledWithCUDA
()
{
bool
IsCompiledWithCUDA
()
{
#ifndef PADDLE_WITH_CUDA
#ifndef PADDLE_WITH_CUDA
return
false
;
return
false
;
...
@@ -409,7 +404,6 @@ All parameter, weight, gradient are variables in Paddle.
...
@@ -409,7 +404,6 @@ All parameter, weight, gradient are variables in Paddle.
(
void
(
Executor
::*
)(
const
ProgramDesc
&
,
Scope
*
,
int
,
bool
,
bool
))
&
(
void
(
Executor
::*
)(
const
ProgramDesc
&
,
Scope
*
,
int
,
bool
,
bool
))
&
Executor
::
Run
);
Executor
::
Run
);
m
.
def
(
"unique_integer"
,
UniqueIntegerGenerator
);
m
.
def
(
"init_gflags"
,
framework
::
InitGflags
);
m
.
def
(
"init_gflags"
,
framework
::
InitGflags
);
m
.
def
(
"init_glog"
,
framework
::
InitGLOG
);
m
.
def
(
"init_glog"
,
framework
::
InitGLOG
);
m
.
def
(
"init_devices"
,
&
framework
::
InitDevices
);
m
.
def
(
"init_devices"
,
&
framework
::
InitDevices
);
...
...
python/paddle/v2/fluid/__init__.py
浏览文件 @
e8cb97b8
...
@@ -39,30 +39,16 @@ from concurrency import (Go, make_channel, channel_send, channel_recv,
...
@@ -39,30 +39,16 @@ from concurrency import (Go, make_channel, channel_send, channel_recv,
import
clip
import
clip
from
memory_optimization_transpiler
import
memory_optimize
from
memory_optimization_transpiler
import
memory_optimize
import
profiler
import
profiler
import
unique_name
Tensor
=
LoDTensor
Tensor
=
LoDTensor
__all__
=
framework
.
__all__
+
executor
.
__all__
+
concurrency
.
__all__
+
[
__all__
=
framework
.
__all__
+
executor
.
__all__
+
concurrency
.
__all__
+
[
'io'
,
'io'
,
'initializer'
,
'layers'
,
'nets'
,
'optimizer'
,
'learning_rate_decay'
,
'initializer'
,
'backward'
,
'regularizer'
,
'LoDTensor'
,
'CPUPlace'
,
'CUDAPlace'
,
'Tensor'
,
'layers'
,
'ParamAttr'
,
'WeightNormParamAttr'
,
'DataFeeder'
,
'clip'
,
'nets'
,
'SimpleDistributeTranspiler'
,
'DistributeTranspiler'
,
'memory_optimize'
,
'optimizer'
,
'profiler'
,
'unique_name'
'learning_rate_decay'
,
'backward'
,
'regularizer'
,
'LoDTensor'
,
'CPUPlace'
,
'CUDAPlace'
,
'Tensor'
,
'ParamAttr'
,
'WeightNormParamAttr'
,
'DataFeeder'
,
'clip'
,
'SimpleDistributeTranspiler'
,
'DistributeTranspiler'
,
'memory_optimize'
,
'profiler'
,
]
]
...
...
python/paddle/v2/fluid/backward.py
浏览文件 @
e8cb97b8
...
@@ -16,6 +16,7 @@ from paddle.v2.fluid import framework as framework
...
@@ -16,6 +16,7 @@ from paddle.v2.fluid import framework as framework
from
.
import
core
from
.
import
core
import
collections
import
collections
import
copy
import
copy
import
unique_name
__all__
=
[
__all__
=
[
'append_backward'
,
'append_backward'
,
...
@@ -388,7 +389,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
...
@@ -388,7 +389,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map):
for
name
in
op_desc
.
output_arg_names
():
for
name
in
op_desc
.
output_arg_names
():
if
block
.
desc
.
find_var
(
name
.
encode
(
"ascii"
)):
if
block
.
desc
.
find_var
(
name
.
encode
(
"ascii"
)):
new_name
=
"%s_%s"
%
(
name
,
core
.
unique_integer
(
name
)
)
new_name
=
unique_name
.
generate
(
name
)
op_desc
.
rename_output
(
name
,
new_name
)
op_desc
.
rename_output
(
name
,
new_name
)
var_map
[
name
]
=
new_name
var_map
[
name
]
=
new_name
...
...
python/paddle/v2/fluid/evaluator.py
浏览文件 @
e8cb97b8
...
@@ -15,7 +15,8 @@
...
@@ -15,7 +15,8 @@
import
numpy
as
np
import
numpy
as
np
import
layers
import
layers
from
framework
import
Program
,
unique_name
,
Variable
,
program_guard
from
framework
import
Program
,
Variable
,
program_guard
import
unique_name
from
layer_helper
import
LayerHelper
from
layer_helper
import
LayerHelper
__all__
=
[
__all__
=
[
...
@@ -96,7 +97,7 @@ class Evaluator(object):
...
@@ -96,7 +97,7 @@ class Evaluator(object):
"""
"""
state
=
self
.
helper
.
create_variable
(
state
=
self
.
helper
.
create_variable
(
name
=
"_"
.
join
([
unique_name
(
self
.
helper
.
name
),
suffix
]),
name
=
"_"
.
join
([
unique_name
.
generate
(
self
.
helper
.
name
),
suffix
]),
persistable
=
True
,
persistable
=
True
,
dtype
=
dtype
,
dtype
=
dtype
,
shape
=
shape
)
shape
=
shape
)
...
...
python/paddle/v2/fluid/framework.py
浏览文件 @
e8cb97b8
...
@@ -20,6 +20,7 @@ import numpy as np
...
@@ -20,6 +20,7 @@ import numpy as np
import
proto.framework_pb2
as
framework_pb2
import
proto.framework_pb2
as
framework_pb2
from
.
import
core
from
.
import
core
import
unique_name
__all__
=
[
__all__
=
[
'Block'
,
'Block'
,
...
@@ -47,20 +48,6 @@ def grad_var_name(var_name):
...
@@ -47,20 +48,6 @@ def grad_var_name(var_name):
return
var_name
+
GRAD_VAR_SUFFIX
return
var_name
+
GRAD_VAR_SUFFIX
def
unique_name
(
prefix
):
"""
Generate unique names with prefix
Args:
prefix(str): The prefix of return string
Returns(str): A unique string with the prefix
"""
uid
=
core
.
unique_integer
(
prefix
)
# unique during whole process.
return
"_"
.
join
([
prefix
,
str
(
uid
)])
def
convert_np_dtype_to_dtype_
(
np_dtype
):
def
convert_np_dtype_to_dtype_
(
np_dtype
):
"""
"""
Convert the data type in numpy to the data type in Paddle
Convert the data type in numpy to the data type in Paddle
...
@@ -175,7 +162,7 @@ class Variable(object):
...
@@ -175,7 +162,7 @@ class Variable(object):
self
.
error_clip
=
error_clip
self
.
error_clip
=
error_clip
if
name
is
None
:
if
name
is
None
:
name
=
Variable
.
_unique_var_name_
(
)
name
=
unique_name
.
generate
(
'_generated_var'
)
is_new_var
=
False
is_new_var
=
False
self
.
desc
=
self
.
block
.
desc
.
find_var
(
name
)
self
.
desc
=
self
.
block
.
desc
.
find_var
(
name
)
...
@@ -303,12 +290,6 @@ class Variable(object):
...
@@ -303,12 +290,6 @@ class Variable(object):
def
type
(
self
):
def
type
(
self
):
return
self
.
desc
.
type
()
return
self
.
desc
.
type
()
@
staticmethod
def
_unique_var_name_
():
prefix
=
"_generated_var"
uid
=
core
.
unique_integer
(
prefix
)
# unique during whole process.
return
"_"
.
join
([
prefix
,
str
(
uid
)])
def
set_error_clip
(
self
,
error_clip
):
def
set_error_clip
(
self
,
error_clip
):
self
.
error_clip
=
error_clip
self
.
error_clip
=
error_clip
...
...
python/paddle/v2/fluid/layer_helper.py
浏览文件 @
e8cb97b8
...
@@ -15,8 +15,8 @@
...
@@ -15,8 +15,8 @@
import
copy
import
copy
import
itertools
import
itertools
from
framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
\
from
framework
import
Variable
,
Parameter
,
default_main_program
,
default_startup_program
,
dtype_is_floating
unique_name
,
dtype_is_floating
import
unique_name
from
paddle.v2.fluid.initializer
import
Constant
,
Xavier
from
paddle.v2.fluid.initializer
import
Constant
,
Xavier
from
param_attr
import
ParamAttr
,
WeightNormParamAttr
from
param_attr
import
ParamAttr
,
WeightNormParamAttr
...
@@ -27,7 +27,7 @@ class LayerHelper(object):
...
@@ -27,7 +27,7 @@ class LayerHelper(object):
self
.
layer_type
=
layer_type
self
.
layer_type
=
layer_type
name
=
self
.
kwargs
.
get
(
'name'
,
None
)
name
=
self
.
kwargs
.
get
(
'name'
,
None
)
if
name
is
None
:
if
name
is
None
:
self
.
kwargs
[
'name'
]
=
unique_name
(
self
.
layer_type
)
self
.
kwargs
[
'name'
]
=
unique_name
.
generate
(
self
.
layer_type
)
@
property
@
property
def
name
(
self
):
def
name
(
self
):
...
@@ -117,17 +117,20 @@ class LayerHelper(object):
...
@@ -117,17 +117,20 @@ class LayerHelper(object):
block
=
self
.
startup_program
.
global_block
()):
block
=
self
.
startup_program
.
global_block
()):
if
out
is
None
:
if
out
is
None
:
out
=
block
.
create_var
(
out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'weight_norm_norm'
])),
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_norm'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
abs_out
=
block
.
create_var
(
abs_out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'weight_norm_abs'
])),
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_abs'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
block
.
append_op
(
block
.
append_op
(
type
=
'abs'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
abs_out
})
type
=
'abs'
,
inputs
=
{
'X'
:
x
},
outputs
=
{
'Out'
:
abs_out
})
pow_out
=
block
.
create_var
(
pow_out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'weight_norm_pow'
])),
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_pow'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
block
.
append_op
(
block
.
append_op
(
...
@@ -136,7 +139,8 @@ class LayerHelper(object):
...
@@ -136,7 +139,8 @@ class LayerHelper(object):
outputs
=
{
'Out'
:
pow_out
},
outputs
=
{
'Out'
:
pow_out
},
attrs
=
{
'factor'
:
float
(
p
)})
attrs
=
{
'factor'
:
float
(
p
)})
sum_out
=
block
.
create_var
(
sum_out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'weight_norm_sum'
])),
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_sum'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
block
.
append_op
(
block
.
append_op
(
...
@@ -161,7 +165,7 @@ class LayerHelper(object):
...
@@ -161,7 +165,7 @@ class LayerHelper(object):
block
=
self
.
startup_program
.
global_block
()):
block
=
self
.
startup_program
.
global_block
()):
if
out
is
None
:
if
out
is
None
:
out
=
block
.
create_var
(
out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
(
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_reshape'
])),
[
self
.
name
,
'weight_norm_reshape'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
...
@@ -178,7 +182,7 @@ class LayerHelper(object):
...
@@ -178,7 +182,7 @@ class LayerHelper(object):
block
=
self
.
startup_program
.
global_block
()):
block
=
self
.
startup_program
.
global_block
()):
if
out
is
None
:
if
out
is
None
:
out
=
block
.
create_var
(
out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
(
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_transpose'
])),
[
self
.
name
,
'weight_norm_transpose'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
...
@@ -196,7 +200,8 @@ class LayerHelper(object):
...
@@ -196,7 +200,8 @@ class LayerHelper(object):
"""Computes the norm over all dimensions except dim"""
"""Computes the norm over all dimensions except dim"""
if
out
is
None
:
if
out
is
None
:
out
=
block
.
create_var
(
out
=
block
.
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'weight_norm_norm'
])),
name
=
unique_name
.
generate
(
"."
.
join
(
[
self
.
name
,
'weight_norm_norm'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
)
persistable
=
False
)
if
dim
is
None
:
if
dim
is
None
:
...
@@ -286,7 +291,7 @@ class LayerHelper(object):
...
@@ -286,7 +291,7 @@ class LayerHelper(object):
assert
isinstance
(
attr
,
ParamAttr
)
assert
isinstance
(
attr
,
ParamAttr
)
suffix
=
'b'
if
is_bias
else
'w'
suffix
=
'b'
if
is_bias
else
'w'
if
attr
.
name
is
None
:
if
attr
.
name
is
None
:
attr
.
name
=
unique_name
(
"."
.
join
([
self
.
name
,
suffix
]))
attr
.
name
=
unique_name
.
generate
(
"."
.
join
([
self
.
name
,
suffix
]))
if
default_initializer
is
None
and
attr
.
initializer
is
None
:
if
default_initializer
is
None
and
attr
.
initializer
is
None
:
if
is_bias
:
if
is_bias
:
...
@@ -316,7 +321,7 @@ class LayerHelper(object):
...
@@ -316,7 +321,7 @@ class LayerHelper(object):
def
create_tmp_variable
(
self
,
dtype
,
stop_gradient
=
False
):
def
create_tmp_variable
(
self
,
dtype
,
stop_gradient
=
False
):
return
self
.
main_program
.
current_block
().
create_var
(
return
self
.
main_program
.
current_block
().
create_var
(
name
=
unique_name
(
"."
.
join
([
self
.
name
,
'tmp'
])),
name
=
unique_name
.
generate
(
"."
.
join
([
self
.
name
,
'tmp'
])),
dtype
=
dtype
,
dtype
=
dtype
,
persistable
=
False
,
persistable
=
False
,
stop_gradient
=
stop_gradient
)
stop_gradient
=
stop_gradient
)
...
...
python/paddle/v2/fluid/layers/control_flow.py
浏览文件 @
e8cb97b8
...
@@ -428,7 +428,8 @@ class StaticRNN(object):
...
@@ -428,7 +428,8 @@ class StaticRNN(object):
raise
ValueError
(
raise
ValueError
(
"if init is None, memory at least need shape and batch_ref"
)
"if init is None, memory at least need shape and batch_ref"
)
parent_block
=
self
.
parent_block
()
parent_block
=
self
.
parent_block
()
var_name
=
unique_name
(
"@"
.
join
([
self
.
helper
.
name
,
"memory_boot"
]))
var_name
=
unique_name
.
generate
(
"@"
.
join
(
[
self
.
helper
.
name
,
"memory_boot"
]))
boot_var
=
parent_block
.
create_var
(
boot_var
=
parent_block
.
create_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
shape
,
shape
=
shape
,
...
@@ -450,7 +451,7 @@ class StaticRNN(object):
...
@@ -450,7 +451,7 @@ class StaticRNN(object):
return
self
.
memory
(
init
=
boot_var
)
return
self
.
memory
(
init
=
boot_var
)
else
:
else
:
pre_mem
=
self
.
helper
.
create_variable
(
pre_mem
=
self
.
helper
.
create_variable
(
name
=
unique_name
(
"@"
.
join
([
self
.
helper
.
name
,
"mem"
])),
name
=
unique_name
.
generate
(
"@"
.
join
([
self
.
helper
.
name
,
"mem"
])),
dtype
=
init
.
dtype
,
dtype
=
init
.
dtype
,
shape
=
init
.
shape
)
shape
=
init
.
shape
)
self
.
memories
[
pre_mem
.
name
]
=
StaticRNNMemoryLink
(
self
.
memories
[
pre_mem
.
name
]
=
StaticRNNMemoryLink
(
...
@@ -709,7 +710,7 @@ def lod_rank_table(x, level=0):
...
@@ -709,7 +710,7 @@ def lod_rank_table(x, level=0):
helper
=
LayerHelper
(
"lod_rank_table"
,
**
locals
())
helper
=
LayerHelper
(
"lod_rank_table"
,
**
locals
())
table
=
helper
.
create_variable
(
table
=
helper
.
create_variable
(
type
=
core
.
VarDesc
.
VarType
.
LOD_RANK_TABLE
,
type
=
core
.
VarDesc
.
VarType
.
LOD_RANK_TABLE
,
name
=
unique_name
(
"lod_rank_table"
))
name
=
unique_name
.
generate
(
"lod_rank_table"
))
helper
.
append_op
(
helper
.
append_op
(
type
=
'lod_rank_table'
,
type
=
'lod_rank_table'
,
inputs
=
{
'X'
:
x
},
inputs
=
{
'X'
:
x
},
...
@@ -807,7 +808,7 @@ def lod_tensor_to_array(x, table):
...
@@ -807,7 +808,7 @@ def lod_tensor_to_array(x, table):
"""
"""
helper
=
LayerHelper
(
"lod_tensor_to_array"
,
**
locals
())
helper
=
LayerHelper
(
"lod_tensor_to_array"
,
**
locals
())
array
=
helper
.
create_variable
(
array
=
helper
.
create_variable
(
name
=
unique_name
(
"lod_tensor_to_array"
),
name
=
unique_name
.
generate
(
"lod_tensor_to_array"
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
helper
.
append_op
(
helper
.
append_op
(
...
@@ -1264,11 +1265,11 @@ class IfElse(object):
...
@@ -1264,11 +1265,11 @@ class IfElse(object):
if
id
(
x
)
not
in
self
.
input_table
:
if
id
(
x
)
not
in
self
.
input_table
:
parent_block
=
self
.
parent_block
()
parent_block
=
self
.
parent_block
()
out_true
=
parent_block
.
create_var
(
out_true
=
parent_block
.
create_var
(
name
=
unique_name
(
'ifelse_input'
+
self
.
helper
.
name
),
name
=
unique_name
.
generate
(
'ifelse_input'
+
self
.
helper
.
name
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
out_false
=
parent_block
.
create_var
(
out_false
=
parent_block
.
create_var
(
name
=
unique_name
(
'ifelse_input'
+
self
.
helper
.
name
),
name
=
unique_name
.
generate
(
'ifelse_input'
+
self
.
helper
.
name
),
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
parent_block
.
append_op
(
parent_block
.
append_op
(
type
=
'split_lod_tensor'
,
type
=
'split_lod_tensor'
,
...
@@ -1310,7 +1311,8 @@ class IfElse(object):
...
@@ -1310,7 +1311,8 @@ class IfElse(object):
raise
TypeError
(
"Each output should be a variable"
)
raise
TypeError
(
"Each output should be a variable"
)
# create outside tensor
# create outside tensor
outside_out
=
parent_block
.
create_var
(
outside_out
=
parent_block
.
create_var
(
name
=
unique_name
(
"_"
.
join
([
self
.
helper
.
name
,
'output'
])),
name
=
unique_name
.
generate
(
"_"
.
join
(
[
self
.
helper
.
name
,
'output'
])),
dtype
=
each_out
.
dtype
)
dtype
=
each_out
.
dtype
)
out_table
.
append
(
outside_out
)
out_table
.
append
(
outside_out
)
...
@@ -1373,7 +1375,7 @@ class DynamicRNN(object):
...
@@ -1373,7 +1375,7 @@ class DynamicRNN(object):
parent_block
=
self
.
_parent_block_
()
parent_block
=
self
.
_parent_block_
()
if
self
.
lod_rank_table
is
None
:
if
self
.
lod_rank_table
is
None
:
self
.
lod_rank_table
=
parent_block
.
create_var
(
self
.
lod_rank_table
=
parent_block
.
create_var
(
name
=
unique_name
(
'lod_rank_table'
),
name
=
unique_name
.
generate
(
'lod_rank_table'
),
type
=
core
.
VarDesc
.
VarType
.
LOD_RANK_TABLE
)
type
=
core
.
VarDesc
.
VarType
.
LOD_RANK_TABLE
)
self
.
lod_rank_table
.
stop_gradient
=
True
self
.
lod_rank_table
.
stop_gradient
=
True
parent_block
.
append_op
(
parent_block
.
append_op
(
...
@@ -1381,7 +1383,8 @@ class DynamicRNN(object):
...
@@ -1381,7 +1383,8 @@ class DynamicRNN(object):
inputs
=
{
"X"
:
x
},
inputs
=
{
"X"
:
x
},
outputs
=
{
"Out"
:
self
.
lod_rank_table
})
outputs
=
{
"Out"
:
self
.
lod_rank_table
})
self
.
max_seq_len
=
parent_block
.
create_var
(
self
.
max_seq_len
=
parent_block
.
create_var
(
name
=
unique_name
(
'dynamic_rnn_max_seq_len'
),
dtype
=
'int64'
)
name
=
unique_name
.
generate
(
'dynamic_rnn_max_seq_len'
),
dtype
=
'int64'
)
self
.
max_seq_len
.
stop_gradient
=
False
self
.
max_seq_len
.
stop_gradient
=
False
parent_block
.
append_op
(
parent_block
.
append_op
(
type
=
'max_sequence_len'
,
type
=
'max_sequence_len'
,
...
@@ -1395,7 +1398,7 @@ class DynamicRNN(object):
...
@@ -1395,7 +1398,7 @@ class DynamicRNN(object):
outputs
=
{
'Out'
:
self
.
cond
})
outputs
=
{
'Out'
:
self
.
cond
})
input_array
=
parent_block
.
create_var
(
input_array
=
parent_block
.
create_var
(
name
=
unique_name
(
'dynamic_rnn_input_array'
),
name
=
unique_name
.
generate
(
'dynamic_rnn_input_array'
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
self
.
input_array
.
append
((
input_array
,
x
.
dtype
))
self
.
input_array
.
append
((
input_array
,
x
.
dtype
))
...
@@ -1416,7 +1419,7 @@ class DynamicRNN(object):
...
@@ -1416,7 +1419,7 @@ class DynamicRNN(object):
"static_input() must be called after step_input()."
)
"static_input() must be called after step_input()."
)
parent_block
=
self
.
_parent_block_
()
parent_block
=
self
.
_parent_block_
()
x_reordered
=
parent_block
.
create_var
(
x_reordered
=
parent_block
.
create_var
(
name
=
unique_name
(
"dynamic_rnn_static_input_reordered"
),
name
=
unique_name
.
generate
(
"dynamic_rnn_static_input_reordered"
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
dtype
=
x
.
dtype
)
dtype
=
x
.
dtype
)
parent_block
.
append_op
(
parent_block
.
append_op
(
...
@@ -1478,7 +1481,7 @@ class DynamicRNN(object):
...
@@ -1478,7 +1481,7 @@ class DynamicRNN(object):
'invoked before '
'invoked before '
'memory(init=init, need_reordered=True, ...).'
)
'memory(init=init, need_reordered=True, ...).'
)
init_reordered
=
parent_block
.
create_var
(
init_reordered
=
parent_block
.
create_var
(
name
=
unique_name
(
'dynamic_rnn_mem_init_reordered'
),
name
=
unique_name
.
generate
(
'dynamic_rnn_mem_init_reordered'
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR
,
dtype
=
init
.
dtype
)
dtype
=
init
.
dtype
)
parent_block
.
append_op
(
parent_block
.
append_op
(
...
@@ -1490,7 +1493,7 @@ class DynamicRNN(object):
...
@@ -1490,7 +1493,7 @@ class DynamicRNN(object):
outputs
=
{
'Out'
:
[
init_reordered
]})
outputs
=
{
'Out'
:
[
init_reordered
]})
init_tensor
=
init_reordered
init_tensor
=
init_reordered
mem_array
=
parent_block
.
create_var
(
mem_array
=
parent_block
.
create_var
(
name
=
unique_name
(
'dynamic_rnn_mem_array'
),
name
=
unique_name
.
generate
(
'dynamic_rnn_mem_array'
),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
init
.
dtype
)
dtype
=
init
.
dtype
)
parent_block
.
append_op
(
parent_block
.
append_op
(
...
@@ -1510,9 +1513,10 @@ class DynamicRNN(object):
...
@@ -1510,9 +1513,10 @@ class DynamicRNN(object):
)
)
parent_block
=
self
.
_parent_block_
()
parent_block
=
self
.
_parent_block_
()
init
=
parent_block
.
create_var
(
init
=
parent_block
.
create_var
(
name
=
unique_name
(
'mem_init'
),
dtype
=
dtype
)
name
=
unique_name
.
generate
(
'mem_init'
),
dtype
=
dtype
)
arr
,
dtype
=
self
.
input_array
[
0
]
arr
,
dtype
=
self
.
input_array
[
0
]
in0
=
parent_block
.
create_var
(
name
=
unique_name
(
'in0'
),
dtype
=
dtype
)
in0
=
parent_block
.
create_var
(
name
=
unique_name
.
generate
(
'in0'
),
dtype
=
dtype
)
parent_block
.
append_op
(
parent_block
.
append_op
(
type
=
'read_from_array'
,
type
=
'read_from_array'
,
inputs
=
{
'X'
:
[
arr
],
inputs
=
{
'X'
:
[
arr
],
...
@@ -1551,7 +1555,7 @@ class DynamicRNN(object):
...
@@ -1551,7 +1555,7 @@ class DynamicRNN(object):
parent_block
=
self
.
_parent_block_
()
parent_block
=
self
.
_parent_block_
()
for
each
in
outputs
:
for
each
in
outputs
:
outside_array
=
parent_block
.
create_var
(
outside_array
=
parent_block
.
create_var
(
name
=
unique_name
(
"_"
.
join
(
name
=
unique_name
.
generate
(
"_"
.
join
(
[
self
.
helper
.
name
,
"output_array"
,
each
.
name
])),
[
self
.
helper
.
name
,
"output_array"
,
each
.
name
])),
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
type
=
core
.
VarDesc
.
VarType
.
LOD_TENSOR_ARRAY
,
dtype
=
each
.
dtype
)
dtype
=
each
.
dtype
)
...
...
python/paddle/v2/fluid/layers/device.py
浏览文件 @
e8cb97b8
...
@@ -25,7 +25,8 @@ __all__ = ['get_places']
...
@@ -25,7 +25,8 @@ __all__ = ['get_places']
@
autodoc
()
@
autodoc
()
def
get_places
(
device_count
=
None
,
device_type
=
None
):
def
get_places
(
device_count
=
None
,
device_type
=
None
):
helper
=
LayerHelper
(
'get_places'
,
**
locals
())
helper
=
LayerHelper
(
'get_places'
,
**
locals
())
out_places
=
helper
.
create_variable
(
name
=
unique_name
(
helper
.
name
+
".out"
))
out_places
=
helper
.
create_variable
(
name
=
unique_name
.
generate
(
helper
.
name
+
".out"
))
attrs
=
dict
()
attrs
=
dict
()
if
device_count
is
not
None
:
if
device_count
is
not
None
:
attrs
[
'device_count'
]
=
int
(
device_count
)
attrs
[
'device_count'
]
=
int
(
device_count
)
...
...
python/paddle/v2/fluid/layers/math_op_patch.py
浏览文件 @
e8cb97b8
...
@@ -21,7 +21,7 @@ __all__ = ['monkey_patch_variable']
...
@@ -21,7 +21,7 @@ __all__ = ['monkey_patch_variable']
def
monkey_patch_variable
():
def
monkey_patch_variable
():
def
unique_tmp_name
():
def
unique_tmp_name
():
return
unique_name
(
"tmp"
)
return
unique_name
.
generate
(
"tmp"
)
def
safe_get_dtype
(
var
):
def
safe_get_dtype
(
var
):
try
:
try
:
...
...
python/paddle/v2/fluid/optimizer.py
浏览文件 @
e8cb97b8
...
@@ -17,7 +17,8 @@ from collections import defaultdict
...
@@ -17,7 +17,8 @@ from collections import defaultdict
import
framework
import
framework
import
layers
import
layers
from
backward
import
append_backward
from
backward
import
append_backward
from
framework
import
unique_name
,
program_guard
from
framework
import
program_guard
import
unique_name
from
initializer
import
Constant
from
initializer
import
Constant
from
layer_helper
import
LayerHelper
from
layer_helper
import
LayerHelper
from
regularizer
import
append_regularization_ops
from
regularizer
import
append_regularization_ops
...
@@ -49,7 +50,7 @@ class Optimizer(object):
...
@@ -49,7 +50,7 @@ class Optimizer(object):
def
_create_global_learning_rate
(
self
):
def
_create_global_learning_rate
(
self
):
if
isinstance
(
self
.
_global_learning_rate
,
float
):
if
isinstance
(
self
.
_global_learning_rate
,
float
):
self
.
_global_learning_rate
=
layers
.
create_global_var
(
self
.
_global_learning_rate
=
layers
.
create_global_var
(
name
=
unique_name
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_global_learning_rate
),
value
=
float
(
self
.
_global_learning_rate
),
dtype
=
'float32'
,
dtype
=
'float32'
,
...
@@ -118,7 +119,7 @@ class Optimizer(object):
...
@@ -118,7 +119,7 @@ class Optimizer(object):
assert
isinstance
(
self
.
helper
,
LayerHelper
)
assert
isinstance
(
self
.
helper
,
LayerHelper
)
var
=
self
.
helper
.
create_global_variable
(
var
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
(
name
),
name
=
unique_name
.
generate
(
name
),
persistable
=
True
,
persistable
=
True
,
dtype
=
dtype
or
param
.
dtype
,
dtype
=
dtype
or
param
.
dtype
,
type
=
param
.
type
,
type
=
param
.
type
,
...
@@ -379,7 +380,7 @@ class AdamOptimizer(Optimizer):
...
@@ -379,7 +380,7 @@ class AdamOptimizer(Optimizer):
# Create beta1 and beta2 power tensors
# Create beta1 and beta2 power tensors
beta_shape
=
[
1
]
beta_shape
=
[
1
]
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
(
'beta1_pow_acc'
),
name
=
unique_name
.
generate
(
'beta1_pow_acc'
),
dtype
=
'float32'
,
dtype
=
'float32'
,
shape
=
beta_shape
,
shape
=
beta_shape
,
lod_level
=
0
,
lod_level
=
0
,
...
@@ -388,7 +389,7 @@ class AdamOptimizer(Optimizer):
...
@@ -388,7 +389,7 @@ class AdamOptimizer(Optimizer):
self
.
_beta1_pow_acc
,
initializer
=
Constant
(
self
.
_beta1
))
self
.
_beta1_pow_acc
,
initializer
=
Constant
(
self
.
_beta1
))
self
.
_beta2_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta2_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
(
'beta2_pow_acc'
),
name
=
unique_name
.
generate
(
'beta2_pow_acc'
),
dtype
=
'float32'
,
dtype
=
'float32'
,
shape
=
beta_shape
,
shape
=
beta_shape
,
lod_level
=
0
,
lod_level
=
0
,
...
@@ -481,7 +482,7 @@ class AdamaxOptimizer(Optimizer):
...
@@ -481,7 +482,7 @@ class AdamaxOptimizer(Optimizer):
# Create beta1 power accumulator tensor
# Create beta1 power accumulator tensor
beta_shape
=
[
1
]
beta_shape
=
[
1
]
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
(
'beta1_pow_acc'
),
name
=
unique_name
.
generate
(
'beta1_pow_acc'
),
dtype
=
'float32'
,
dtype
=
'float32'
,
shape
=
beta_shape
,
shape
=
beta_shape
,
lod_level
=
0
,
lod_level
=
0
,
...
...
python/paddle/v2/fluid/unique_name.py
0 → 100644
浏览文件 @
e8cb97b8
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
collections
import
contextlib
import
sys
__all__
=
[
'generate'
,
'switch'
,
'guard'
,
'UniqueNameGenerator'
]
class
UniqueNameGenerator
(
object
):
"""
Generate unique name with prefix.
Args:
prefix(str): The generated name prefix. All generated name will be
started with this prefix.
"""
def
__init__
(
self
,
prefix
=
None
):
self
.
ids
=
collections
.
defaultdict
(
int
)
if
prefix
is
None
:
prefix
=
""
self
.
prefix
=
prefix
def
__call__
(
self
,
key
):
"""
Generate unique names with prefix
Args:
key(str): The key of return string.
Returns(str): A unique string with the prefix
"""
tmp
=
self
.
ids
[
key
]
self
.
ids
[
key
]
+=
1
return
self
.
prefix
+
"_"
.
join
([
key
,
str
(
tmp
)])
generator
=
UniqueNameGenerator
()
def
generate
(
prefix
):
return
generator
(
prefix
)
def
switch
(
new_generator
=
None
):
global
generator
old
=
generator
if
new_generator
is
None
:
generator
=
UniqueNameGenerator
()
else
:
generator
=
new_generator
return
old
@
contextlib
.
contextmanager
def
guard
(
new_generator
=
None
):
if
isinstance
(
new_generator
,
basestring
):
new_generator
=
UniqueNameGenerator
(
new_generator
)
old
=
switch
(
new_generator
)
yield
switch
(
old
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录