Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
0ac8c74e
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
0ac8c74e
编写于
11月 27, 2017
作者:
Y
Yu Yang
提交者:
GitHub
11月 27, 2017
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Unify fluid submodules to fluid module (#5924)
Change books just use `import fluid`, not submodules
上级
e6546baa
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
381 addition
and
425 deletion
+381
-425
python/paddle/v2/fluid/__init__.py
python/paddle/v2/fluid/__init__.py
+41
-11
python/paddle/v2/fluid/evaluator.py
python/paddle/v2/fluid/evaluator.py
+3
-4
python/paddle/v2/fluid/executor.py
python/paddle/v2/fluid/executor.py
+4
-2
python/paddle/v2/fluid/framework.py
python/paddle/v2/fluid/framework.py
+4
-4
python/paddle/v2/fluid/initializer.py
python/paddle/v2/fluid/initializer.py
+18
-5
python/paddle/v2/fluid/layer_helper.py
python/paddle/v2/fluid/layer_helper.py
+5
-6
python/paddle/v2/fluid/layers.py
python/paddle/v2/fluid/layers.py
+16
-20
python/paddle/v2/fluid/nets.py
python/paddle/v2/fluid/nets.py
+1
-1
python/paddle/v2/fluid/optimizer.py
python/paddle/v2/fluid/optimizer.py
+28
-15
python/paddle/v2/fluid/regularizer.py
python/paddle/v2/fluid/regularizer.py
+15
-4
python/paddle/v2/fluid/tests/book/test_fit_a_line.py
python/paddle/v2/fluid/tests/book/test_fit_a_line.py
+22
-35
python/paddle/v2/fluid/tests/book/test_image_classification_train.py
...le/v2/fluid/tests/book/test_image_classification_train.py
+33
-62
python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py
...n/paddle/v2/fluid/tests/book/test_label_semantic_roles.py
+34
-38
python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py
.../paddle/v2/fluid/tests/book/test_recognize_digits_conv.py
+19
-31
python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py
...n/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py
+33
-44
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
...dle/v2/fluid/tests/book/test_understand_sentiment_conv.py
+24
-30
python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py
...luid/tests/book/test_understand_sentiment_dynamic_lstm.py
+27
-33
python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py
...dle/v2/fluid/tests/book/test_understand_sentiment_lstm.py
+24
-25
python/paddle/v2/fluid/tests/book/test_word2vec.py
python/paddle/v2/fluid/tests/book/test_word2vec.py
+30
-55
未找到文件。
python/paddle/v2/fluid/__init__.py
浏览文件 @
0ac8c74e
import
sys
# import all class inside framework into fluid module
import
core
import
framework
__all__
=
[
'proto'
]
from
framework
import
*
argv
=
[]
# import all class inside executor into fluid module
if
core
.
is_compile_gpu
():
import
executor
argv
=
list
(
sys
.
argv
)
+
[
from
executor
import
*
"--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory"
]
import
io
else
:
import
evaluator
argv
=
list
(
sys
.
argv
)
+
[
"--tryfromenv=use_pinned_memory"
]
import
initializer
core
.
init_gflags
(
argv
)
import
layers
import
nets
import
optimizer
import
backward
import
regularizer
from
core
import
LoDTensor
,
CPUPlace
,
GPUPlace
Tensor
=
LoDTensor
__all__
=
framework
.
__all__
+
executor
.
__all__
+
[
'io'
,
'initializer'
,
'layers'
,
'nets'
,
'optimizer'
,
'backward'
,
'regularizer'
,
'LoDTensor'
,
'CPUPlace'
,
'GPUPlace'
,
'Tensor'
]
def
__read_gflags_from_env__
():
"""
Enable reading gflags from environment variables.
Returns:
None
"""
import
sys
import
core
read_env_flags
=
[
'use_pinned_memory'
]
if
core
.
is_compile_gpu
():
read_env_flags
.
append
(
'fraction_of_gpu_memory_to_use'
)
core
.
init_gflags
(
sys
.
argv
+
[
"--tryfromenv="
+
","
.
join
(
read_env_flags
)])
__read_gflags_from_env__
()
python/paddle/v2/fluid/evaluator.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2.fluid.layers
as
layers
import
layers
from
paddle.v2.fluid.framework
import
Program
,
unique_name
,
\
from
framework
import
Program
,
unique_name
,
Variable
Variable
from
layer_helper
import
LayerHelper
from
paddle.v2.fluid.layer_helper
import
LayerHelper
__all__
=
[
'Accuracy'
]
__all__
=
[
'Accuracy'
]
...
...
python/paddle/v2/fluid/executor.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2.fluid.core
as
core
from
.
import
core
from
paddle.v2.fluid.framework
import
Block
,
Program
,
g_main_program
from
framework
import
Program
,
g_main_program
__all__
=
[
'Executor'
,
'g_scope'
]
g_scope
=
core
.
Scope
()
g_scope
=
core
.
Scope
()
...
...
python/paddle/v2/fluid/framework.py
浏览文件 @
0ac8c74e
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid.proto.framework_pb2
as
framework_pb2
import
collections
import
collections
import
numpy
as
np
import
numpy
as
np
import
copy
from
.
import
core
import
proto.framework_pb2
as
framework_pb2
__all__
=
[
__all__
=
[
'Block'
,
'Variable'
,
'Program'
,
'Operator'
,
'default_startup_program'
,
'Block'
,
'Variable'
,
'Program'
,
'Operator'
,
'default_startup_program'
,
'default_main_program'
'default_main_program'
,
'g_startup_program'
,
'g_main_program'
]
]
...
...
python/paddle/v2/fluid/initializer.py
浏览文件 @
0ac8c74e
import
paddle.v2.fluid.framework
as
framework
import
framework
import
numpy
as
np
import
numpy
as
np
__all__
=
[
__all__
=
[
'Constant'
,
'Uniform'
,
'Normal'
,
'Xavier'
]
'ConstantInitializer'
,
'UniformInitializer'
,
'NormalInitializer'
,
'XavierInitializer'
]
class
Initializer
(
object
):
class
Initializer
(
object
):
...
@@ -368,3 +365,19 @@ class MSRAInitializer(Initializer):
...
@@ -368,3 +365,19 @@ class MSRAInitializer(Initializer):
})
})
var
.
op
=
op
var
.
op
=
op
return
op
return
op
# We short the class name, since users will use the initializer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
# param_attr=ParamAttr(fluid.initializer.Xavier()))
#
# It is no need to add an `Initializer` as the class suffix
Constant
=
ConstantInitializer
Uniform
=
UniformInitializer
Normal
=
NormalInitializer
Xavier
=
XavierInitializer
MSRA
=
MSRAInitializer
python/paddle/v2/fluid/layer_helper.py
浏览文件 @
0ac8c74e
import
copy
import
copy
import
itertools
import
itertools
from
paddle.v2.fluid.framework
import
Variable
,
g_main_program
,
\
from
framework
import
Variable
,
g_main_program
,
\
g_startup_program
,
unique_name
,
Program
,
dtype_is_floating
g_startup_program
,
unique_name
,
dtype_is_floating
from
paddle.v2.fluid.initializer
import
ConstantInitializer
,
\
from
paddle.v2.fluid.initializer
import
Constant
,
Xavier
UniformInitializer
,
XavierInitializer
class
LayerHelper
(
object
):
class
LayerHelper
(
object
):
...
@@ -209,7 +208,7 @@ class LayerHelper(object):
...
@@ -209,7 +208,7 @@ class LayerHelper(object):
def
_get_default_initializer
(
self
,
dtype
):
def
_get_default_initializer
(
self
,
dtype
):
if
dtype
is
None
or
dtype_is_floating
(
dtype
)
is
True
:
if
dtype
is
None
or
dtype_is_floating
(
dtype
)
is
True
:
return
Xavier
Initializer
()
return
Xavier
()
else
:
else
:
# For integer and boolean types, initialize with all zeros
# For integer and boolean types, initialize with all zeros
return
Constant
Initializer
()
return
Constant
()
python/paddle/v2/fluid/layers.py
浏览文件 @
0ac8c74e
import
paddle.v2.fluid.core
as
core
from
.
import
core
import
paddle.v2.fluid.proto.framework_pb2
as
framework_pb2
import
proto.framework_pb2
as
framework_pb2
from
paddle.v2.fluid.framework
import
OpProtoHolder
,
Variable
,
Program
,
\
from
framework
import
OpProtoHolder
,
Variable
,
Program
,
Operator
Operator
from
initializer
import
Constant
,
Normal
,
Xavier
from
paddle.v2.fluid.initializer
import
ConstantInitializer
,
\
NormalInitializer
,
XavierInitializer
from
paddle.v2.fluid.layer_helper
import
LayerHelper
,
unique_name
from
paddle.v2.fluid.layer_helper
import
LayerHelper
,
unique_name
import
re
import
re
import
cStringIO
import
cStringIO
...
@@ -58,10 +56,10 @@ def fc(input,
...
@@ -58,10 +56,10 @@ def fc(input,
"""
"""
def
_get_default_param_initializer
():
def
_get_default_param_initializer
():
return
Xavier
Initializer
()
return
Xavier
()
def
_get_default_bias_initializer
():
def
_get_default_bias_initializer
():
return
Constant
Initializer
()
return
Constant
()
helper
=
LayerHelper
(
'fc'
,
**
locals
())
helper
=
LayerHelper
(
'fc'
,
**
locals
())
...
@@ -139,7 +137,7 @@ def embedding(input,
...
@@ -139,7 +137,7 @@ def embedding(input,
"""
"""
def
_get_default_param_initializer
():
def
_get_default_param_initializer
():
return
Xavier
Initializer
()
return
Xavier
()
helper
=
LayerHelper
(
'embedding'
,
**
locals
())
helper
=
LayerHelper
(
'embedding'
,
**
locals
())
w
=
helper
.
create_parameter
(
w
=
helper
.
create_parameter
(
...
@@ -477,7 +475,7 @@ def linear_chain_crf(input,
...
@@ -477,7 +475,7 @@ def linear_chain_crf(input,
main_program
=
None
,
main_program
=
None
,
startup_program
=
None
):
startup_program
=
None
):
def
_get_default_param_initializer
():
def
_get_default_param_initializer
():
return
Xavier
Initializer
()
return
Xavier
()
helper
=
LayerHelper
(
'linear_chain_crf'
,
**
locals
())
helper
=
LayerHelper
(
'linear_chain_crf'
,
**
locals
())
size
=
input
.
shape
[
1
]
size
=
input
.
shape
[
1
]
...
@@ -661,10 +659,10 @@ def sequence_conv(input,
...
@@ -661,10 +659,10 @@ def sequence_conv(input,
"""
"""
def
_get_default_bias_initializer
():
def
_get_default_bias_initializer
():
return
Constant
Initializer
()
return
Constant
()
def
_get_default_param_initializer
():
def
_get_default_param_initializer
():
return
Xavier
Initializer
()
return
Xavier
()
# FIXME(dzh) : want to unify the argument of python layer
# FIXME(dzh) : want to unify the argument of python layer
# function. So we ignore some unecessary attributes.
# function. So we ignore some unecessary attributes.
...
@@ -725,11 +723,11 @@ def conv2d(input,
...
@@ -725,11 +723,11 @@ def conv2d(input,
"""
"""
def
_get_default_bias_initializer
():
def
_get_default_bias_initializer
():
return
Constant
Initializer
()
return
Constant
()
def
_get_default_param_initializer
(
filter_size
,
num_channels
):
def
_get_default_param_initializer
(
filter_size
,
num_channels
):
std
=
(
2.0
/
(
filter_size
[
0
]
**
2
*
num_channels
))
**
0.5
std
=
(
2.0
/
(
filter_size
[
0
]
**
2
*
num_channels
))
**
0.5
return
Normal
Initializer
(
0.0
,
std
,
0
)
return
Normal
(
0.0
,
std
,
0
)
helper
=
LayerHelper
(
'conv2d'
,
**
locals
())
helper
=
LayerHelper
(
'conv2d'
,
**
locals
())
dtype
=
helper
.
input_dtype
()
dtype
=
helper
.
input_dtype
()
...
@@ -878,22 +876,20 @@ def batch_norm(input,
...
@@ -878,22 +876,20 @@ def batch_norm(input,
attr
=
helper
.
param_attr
,
attr
=
helper
.
param_attr
,
shape
=
param_shape
,
shape
=
param_shape
,
dtype
=
dtype
,
dtype
=
dtype
,
initializer
=
Constant
Initializer
(
1.0
))
initializer
=
Constant
(
1.0
))
bias
=
helper
.
create_parameter
(
bias
=
helper
.
create_parameter
(
attr
=
helper
.
param_attr
,
attr
=
helper
.
param_attr
,
shape
=
param_shape
,
shape
=
param_shape
,
dtype
=
dtype
,
dtype
=
dtype
,
initializer
=
Constant
Initializer
(
0.0
))
initializer
=
Constant
(
0.0
))
mean
=
helper
.
create_global_variable
(
mean
=
helper
.
create_global_variable
(
dtype
=
input
.
dtype
,
shape
=
param_shape
,
persistable
=
True
)
dtype
=
input
.
dtype
,
shape
=
param_shape
,
persistable
=
True
)
helper
.
set_variable_initializer
(
helper
.
set_variable_initializer
(
var
=
mean
,
initializer
=
Constant
(
0.0
))
var
=
mean
,
initializer
=
ConstantInitializer
(
0.0
))
variance
=
helper
.
create_global_variable
(
variance
=
helper
.
create_global_variable
(
dtype
=
input
.
dtype
,
shape
=
param_shape
,
persistable
=
True
)
dtype
=
input
.
dtype
,
shape
=
param_shape
,
persistable
=
True
)
helper
.
set_variable_initializer
(
helper
.
set_variable_initializer
(
var
=
variance
,
initializer
=
Constant
(
1.0
))
var
=
variance
,
initializer
=
ConstantInitializer
(
1.0
))
# create output
# create output
# mean and mean_out share the same memory
# mean and mean_out share the same memory
...
...
python/paddle/v2/fluid/nets.py
浏览文件 @
0ac8c74e
import
paddle.v2.fluid.layers
as
layers
import
layers
__all__
=
[
"simple_img_conv_pool"
,
"sequence_conv_pool"
]
__all__
=
[
"simple_img_conv_pool"
,
"sequence_conv_pool"
]
...
...
python/paddle/v2/fluid/optimizer.py
浏览文件 @
0ac8c74e
from
collections
import
defaultdict
from
collections
import
defaultdict
import
paddle.v2.fluid.framework
as
framework
import
framework
from
paddle.v2.fluid.framework
import
unique_name
,
Program
from
backward
import
append_backward_ops
from
paddle.v2.fluid.backward
import
append_backward_ops
from
framework
import
unique_name
from
paddle.v2.fluid.initializer
import
ConstantInitializer
from
initializer
import
Constant
from
paddle.v2.fluid.regularizer
import
append_regularization_ops
from
layer_helper
import
LayerHelper
from
paddle.v2.fluid.layer_helper
import
LayerHelper
from
regularizer
import
append_regularization_ops
__all__
=
[
__all__
=
[
'SGD'
,
'Momentum'
,
'Adagrad'
,
'Adam'
,
'Adamax'
,
'DecayedAdagrad'
]
'SGDOptimizer'
,
'MomentumOptimizer'
,
'AdagradOptimizer'
,
'AdamOptimizer'
,
'AdamaxOptimizer'
,
'DecayedAdagradOptimizer'
]
class
Optimizer
(
object
):
class
Optimizer
(
object
):
...
@@ -48,7 +45,7 @@ class Optimizer(object):
...
@@ -48,7 +45,7 @@ class Optimizer(object):
persistable
=
True
)
persistable
=
True
)
param_lr
=
param_lr
*
self
.
_learning_rate
param_lr
=
param_lr
*
self
.
_learning_rate
self
.
helper
.
set_variable_initializer
(
self
.
helper
.
set_variable_initializer
(
var
=
param_lr_var
,
initializer
=
Constant
Initializer
(
param_lr
))
var
=
param_lr_var
,
initializer
=
Constant
(
param_lr
))
return
param_lr_var
return
param_lr_var
def
_create_accumulators
(
self
,
block
,
parameters
):
def
_create_accumulators
(
self
,
block
,
parameters
):
...
@@ -96,7 +93,7 @@ class Optimizer(object):
...
@@ -96,7 +93,7 @@ class Optimizer(object):
type
=
param
.
type
,
type
=
param
.
type
,
shape
=
param
.
shape
)
shape
=
param
.
shape
)
self
.
helper
.
set_variable_initializer
(
self
.
helper
.
set_variable_initializer
(
var
,
initializer
=
Constant
Initializer
(
value
=
float
(
fill_value
)))
var
,
initializer
=
Constant
(
value
=
float
(
fill_value
)))
self
.
_accumulators
[
name
][
param
.
name
]
=
var
self
.
_accumulators
[
name
][
param
.
name
]
=
var
def
_get_accumulator
(
self
,
name
,
param
):
def
_get_accumulator
(
self
,
name
,
param
):
...
@@ -360,7 +357,7 @@ class AdamOptimizer(Optimizer):
...
@@ -360,7 +357,7 @@ class AdamOptimizer(Optimizer):
lod_level
=
0
,
lod_level
=
0
,
persistable
=
True
)
persistable
=
True
)
self
.
helper
.
set_variable_initializer
(
self
.
helper
.
set_variable_initializer
(
self
.
_beta1_pow_acc
,
initializer
=
Constant
Initializer
(
self
.
_beta1
))
self
.
_beta1_pow_acc
,
initializer
=
Constant
(
self
.
_beta1
))
self
.
_beta2_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta2_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
(
'beta2_pow_acc'
),
name
=
unique_name
(
'beta2_pow_acc'
),
...
@@ -370,7 +367,7 @@ class AdamOptimizer(Optimizer):
...
@@ -370,7 +367,7 @@ class AdamOptimizer(Optimizer):
persistable
=
True
)
persistable
=
True
)
self
.
helper
.
set_variable_initializer
(
self
.
helper
.
set_variable_initializer
(
self
.
_beta2_pow_acc
,
initializer
=
Constant
Initializer
(
self
.
_beta2
))
self
.
_beta2_pow_acc
,
initializer
=
Constant
(
self
.
_beta2
))
# Create accumulator tensors for first and second moments
# Create accumulator tensors for first and second moments
for
p
in
parameters
:
for
p
in
parameters
:
...
@@ -462,7 +459,7 @@ class AdamaxOptimizer(Optimizer):
...
@@ -462,7 +459,7 @@ class AdamaxOptimizer(Optimizer):
lod_level
=
0
,
lod_level
=
0
,
persistable
=
True
)
persistable
=
True
)
self
.
helper
.
set_variable_initializer
(
self
.
helper
.
set_variable_initializer
(
self
.
_beta1_pow_acc
,
initializer
=
Constant
Initializer
(
self
.
_beta1
))
self
.
_beta1_pow_acc
,
initializer
=
Constant
(
self
.
_beta1
))
# Create accumulator tensors for first moment and infinity norm
# Create accumulator tensors for first moment and infinity norm
for
p
in
parameters
:
for
p
in
parameters
:
...
@@ -559,3 +556,19 @@ class DecayedAdagradOptimizer(Optimizer):
...
@@ -559,3 +556,19 @@ class DecayedAdagradOptimizer(Optimizer):
attrs
=
{
"epsilon"
:
self
.
_epsilon
})
attrs
=
{
"epsilon"
:
self
.
_epsilon
})
return
decayed_adagrad_op
return
decayed_adagrad_op
# We short the class name, since users will use the optimizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# sgd = fluid.optimizer.SGD(...)
#
# It is no need to add an `Optimizer` as the class suffix
SGD
=
SGDOptimizer
Momentum
=
MomentumOptimizer
Adagrad
=
AdagradOptimizer
Adam
=
AdamOptimizer
Adamax
=
AdamaxOptimizer
DecayedAdagrad
=
DecayedAdagradOptimizer
python/paddle/v2/fluid/regularizer.py
浏览文件 @
0ac8c74e
import
paddle.v2.fluid.framework
as
framework
import
framework
__all__
=
[
__all__
=
[
'append_regularization_ops'
,
'L1Decay'
,
'L2Decay'
]
'append_regularization_ops'
,
'L2DecayRegularizer'
,
'L1DecayRegularizer'
]
def
append_regularization_ops
(
parameters_and_grads
):
def
append_regularization_ops
(
parameters_and_grads
):
...
@@ -139,3 +137,16 @@ class L1DecayRegularizer(WeightDecayRegularizer):
...
@@ -139,3 +137,16 @@ class L1DecayRegularizer(WeightDecayRegularizer):
attrs
=
{
"scale"
:
self
.
_regularization_coeff
})
attrs
=
{
"scale"
:
self
.
_regularization_coeff
})
return
decay
return
decay
# We short the class name, since users will use the regulaizer with the package
# name. The sample code:
#
# import paddle.fluid as fluid
#
# hidden = fluid.layers.fc(...,
# param_attr=ParamAttr(fluid.regularizer.Xavier()))
#
# It is no need to add a `Regularizer` as the class suffix
L1Decay
=
L1DecayRegularizer
L2Decay
=
L2DecayRegularizer
python/paddle/v2/fluid/tests/book/test_fit_a_line.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.io
import
save_persistables
,
load_persistables
from
paddle.v2.fluid.optimizer
import
SGDOptimizer
x
=
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
'float32'
)
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
'float32'
)
y_predict
=
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
)
y_predict
=
fluid
.
layers
.
fc
(
input
=
x
,
size
=
1
,
act
=
None
)
y
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
y
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'float32'
)
cost
=
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
cost
=
fluid
.
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
y
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
sgd_optimizer
=
SGDOptimizer
(
learning_rate
=
0.001
)
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
)
sgd_optimizer
.
minimize
(
avg_cost
)
BATCH_SIZE
=
20
BATCH_SIZE
=
20
...
@@ -26,32 +21,24 @@ train_reader = paddle.batch(
...
@@ -26,32 +21,24 @@ train_reader = paddle.batch(
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
paddle
.
dataset
.
uci_housing
.
train
(),
buf_size
=
500
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
PASS_NUM
=
100
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
for
pass_id
in
range
(
PASS_NUM
):
save_persistables
(
exe
,
"./fit_a_line.model/"
)
fluid
.
io
.
save_persistables
(
exe
,
"./fit_a_line.model/"
)
load_persistables
(
exe
,
"./fit_a_line.model/"
)
fluid
.
io
.
load_persistables
(
exe
,
"./fit_a_line.model/"
)
for
data
in
train_reader
():
for
data
in
train_reader
():
x_data
=
np
.
array
(
map
(
lambda
x
:
x
[
0
],
data
)).
astype
(
"float32"
)
x_data
=
np
.
array
(
map
(
lambda
_
:
_
[
0
],
data
)).
astype
(
"float32"
)
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"float32"
)
y_data
=
np
.
array
(
map
(
lambda
_
:
_
[
1
],
data
)).
astype
(
"float32"
)
tensor_x
=
core
.
LoDTensor
()
avg_loss_value
,
=
exe
.
run
(
fluid
.
default_main_program
(),
tensor_x
.
set
(
x_data
,
place
)
feed
=
{
'x'
:
x_data
,
# print tensor_x.get_dims()
'y'
:
y_data
},
fetch_list
=
[
avg_cost
])
tensor_y
=
core
.
LoDTensor
()
tensor_y
.
set
(
y_data
,
place
)
if
avg_loss_value
[
0
]
<
10.0
:
# print tensor_y.get_dims()
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
])
out
=
np
.
array
(
outs
[
0
])
if
out
[
0
]
<
10.0
:
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
exit
(
1
)
exit
(
1
)
python/paddle/v2/fluid/tests/book/test_image_classification_train.py
浏览文件 @
0ac8c74e
from
__future__
import
print_function
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
import
paddle.v2.fluid.nets
as
nets
import
paddle.v2.fluid.evaluator
as
evaluator
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.initializer
import
XavierInitializer
from
paddle.v2.fluid.optimizer
import
AdamOptimizer
def
resnet_cifar10
(
input
,
depth
=
32
):
def
resnet_cifar10
(
input
,
depth
=
32
):
def
conv_bn_layer
(
input
,
ch_out
,
filter_size
,
stride
,
padding
,
act
=
'relu'
):
def
conv_bn_layer
(
input
,
ch_out
,
filter_size
,
stride
,
padding
,
act
=
'relu'
):
tmp
=
layers
.
conv2d
(
tmp
=
fluid
.
layers
.
conv2d
(
input
=
input
,
input
=
input
,
filter_size
=
filter_size
,
filter_size
=
filter_size
,
num_filters
=
ch_out
,
num_filters
=
ch_out
,
...
@@ -20,12 +14,11 @@ def resnet_cifar10(input, depth=32):
...
@@ -20,12 +14,11 @@ def resnet_cifar10(input, depth=32):
padding
=
padding
,
padding
=
padding
,
act
=
None
,
act
=
None
,
bias_attr
=
False
)
bias_attr
=
False
)
return
layers
.
batch_norm
(
input
=
tmp
,
act
=
act
)
return
fluid
.
layers
.
batch_norm
(
input
=
tmp
,
act
=
act
)
def
shortcut
(
input
,
ch_in
,
ch_out
,
stride
,
program
,
init_program
):
def
shortcut
(
input
,
ch_in
,
ch_out
,
stride
):
if
ch_in
!=
ch_out
:
if
ch_in
!=
ch_out
:
return
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
0
,
None
,
program
,
return
conv_bn_layer
(
input
,
ch_out
,
1
,
stride
,
0
,
None
)
init_program
)
else
:
else
:
return
input
return
input
...
@@ -33,7 +26,7 @@ def resnet_cifar10(input, depth=32):
...
@@ -33,7 +26,7 @@ def resnet_cifar10(input, depth=32):
tmp
=
conv_bn_layer
(
input
,
ch_out
,
3
,
stride
,
1
)
tmp
=
conv_bn_layer
(
input
,
ch_out
,
3
,
stride
,
1
)
tmp
=
conv_bn_layer
(
tmp
,
ch_out
,
3
,
1
,
1
,
act
=
None
)
tmp
=
conv_bn_layer
(
tmp
,
ch_out
,
3
,
1
,
1
,
act
=
None
)
short
=
shortcut
(
input
,
ch_in
,
ch_out
,
stride
)
short
=
shortcut
(
input
,
ch_in
,
ch_out
,
stride
)
return
layers
.
elementwise_add
(
x
=
tmp
,
y
=
short
,
act
=
'relu'
)
return
fluid
.
layers
.
elementwise_add
(
x
=
tmp
,
y
=
short
,
act
=
'relu'
)
def
layer_warp
(
block_func
,
input
,
ch_in
,
ch_out
,
count
,
stride
):
def
layer_warp
(
block_func
,
input
,
ch_in
,
ch_out
,
count
,
stride
):
tmp
=
block_func
(
input
,
ch_in
,
ch_out
,
stride
)
tmp
=
block_func
(
input
,
ch_in
,
ch_out
,
stride
)
...
@@ -48,14 +41,14 @@ def resnet_cifar10(input, depth=32):
...
@@ -48,14 +41,14 @@ def resnet_cifar10(input, depth=32):
res1
=
layer_warp
(
basicblock
,
conv1
,
16
,
16
,
n
,
1
)
res1
=
layer_warp
(
basicblock
,
conv1
,
16
,
16
,
n
,
1
)
res2
=
layer_warp
(
basicblock
,
res1
,
16
,
32
,
n
,
2
)
res2
=
layer_warp
(
basicblock
,
res1
,
16
,
32
,
n
,
2
)
res3
=
layer_warp
(
basicblock
,
res2
,
32
,
64
,
n
,
2
)
res3
=
layer_warp
(
basicblock
,
res2
,
32
,
64
,
n
,
2
)
pool
=
layers
.
pool2d
(
pool
=
fluid
.
layers
.
pool2d
(
input
=
res3
,
pool_size
=
8
,
pool_type
=
'avg'
,
pool_stride
=
1
)
input
=
res3
,
pool_size
=
8
,
pool_type
=
'avg'
,
pool_stride
=
1
)
return
pool
return
pool
def
vgg16_bn_drop
(
input
):
def
vgg16_bn_drop
(
input
):
def
conv_block
(
input
,
num_filter
,
groups
,
dropouts
):
def
conv_block
(
input
,
num_filter
,
groups
,
dropouts
):
return
nets
.
img_conv_group
(
return
fluid
.
nets
.
img_conv_group
(
input
=
input
,
input
=
input
,
pool_size
=
2
,
pool_size
=
2
,
pool_stride
=
2
,
pool_stride
=
2
,
...
@@ -72,26 +65,20 @@ def vgg16_bn_drop(input):
...
@@ -72,26 +65,20 @@ def vgg16_bn_drop(input):
conv4
=
conv_block
(
conv3
,
512
,
3
,
[
0.4
,
0.4
,
0
])
conv4
=
conv_block
(
conv3
,
512
,
3
,
[
0.4
,
0.4
,
0
])
conv5
=
conv_block
(
conv4
,
512
,
3
,
[
0.4
,
0.4
,
0
])
conv5
=
conv_block
(
conv4
,
512
,
3
,
[
0.4
,
0.4
,
0
])
drop
=
layers
.
dropout
(
x
=
conv5
,
dropout_prob
=
0.5
)
drop
=
fluid
.
layers
.
dropout
(
x
=
conv5
,
dropout_prob
=
0.5
)
fc1
=
layers
.
fc
(
input
=
drop
,
fc1
=
fluid
.
layers
.
fc
(
input
=
drop
,
size
=
512
,
act
=
None
)
size
=
512
,
reshape1
=
fluid
.
layers
.
reshape
(
x
=
fc1
,
shape
=
list
(
fc1
.
shape
+
(
1
,
1
)))
act
=
None
,
bn
=
fluid
.
layers
.
batch_norm
(
input
=
reshape1
,
act
=
'relu'
)
param_attr
=
{
"initializer"
:
XavierInitializer
()})
drop2
=
fluid
.
layers
.
dropout
(
x
=
bn
,
dropout_prob
=
0.5
)
reshape1
=
layers
.
reshape
(
x
=
fc1
,
shape
=
list
(
fc1
.
shape
+
(
1
,
1
)))
fc2
=
fluid
.
layers
.
fc
(
input
=
drop2
,
size
=
512
,
act
=
None
)
bn
=
layers
.
batch_norm
(
input
=
reshape1
,
act
=
'relu'
)
drop2
=
layers
.
dropout
(
x
=
bn
,
dropout_prob
=
0.5
)
fc2
=
layers
.
fc
(
input
=
drop2
,
size
=
512
,
act
=
None
,
param_attr
=
{
"initializer"
:
XavierInitializer
()})
return
fc2
return
fc2
classdim
=
10
classdim
=
10
data_shape
=
[
3
,
32
,
32
]
data_shape
=
[
3
,
32
,
32
]
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
data_shape
,
dtype
=
'float32'
)
images
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
data_shape
,
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
# Add neural network config
# Add neural network config
# option 1. resnet
# option 1. resnet
...
@@ -99,17 +86,14 @@ label = layers.data(name='label', shape=[1], dtype='int64')
...
@@ -99,17 +86,14 @@ label = layers.data(name='label', shape=[1], dtype='int64')
# option 2. vgg
# option 2. vgg
net
=
vgg16_bn_drop
(
images
)
net
=
vgg16_bn_drop
(
images
)
# print(program)
predict
=
fluid
.
layers
.
fc
(
input
=
net
,
size
=
classdim
,
act
=
'softmax'
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
predict
=
layers
.
fc
(
input
=
net
,
size
=
classdim
,
act
=
'softmax'
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.001
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
# optimizer = SGDOptimizer(learning_rate=0.001)
optimizer
=
AdamOptimizer
(
learning_rate
=
0.001
)
opts
=
optimizer
.
minimize
(
avg_cost
)
opts
=
optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
accuracy
=
fluid
.
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
BATCH_SIZE
=
128
BATCH_SIZE
=
128
PASS_NUM
=
1
PASS_NUM
=
1
...
@@ -119,13 +103,12 @@ train_reader = paddle.batch(
...
@@ -119,13 +103,12 @@ train_reader = paddle.batch(
paddle
.
dataset
.
cifar
.
train10
(),
buf_size
=
128
*
10
),
paddle
.
dataset
.
cifar
.
train10
(),
buf_size
=
128
*
10
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
for
pass_id
in
range
(
PASS_NUM
):
for
pass_id
in
range
(
PASS_NUM
):
batch_id
=
0
accuracy
.
reset
(
exe
)
accuracy
.
reset
(
exe
)
for
data
in
train_reader
():
for
data
in
train_reader
():
img_data
=
np
.
array
(
map
(
lambda
x
:
x
[
0
].
reshape
(
data_shape
),
img_data
=
np
.
array
(
map
(
lambda
x
:
x
[
0
].
reshape
(
data_shape
),
...
@@ -136,25 +119,13 @@ for pass_id in range(PASS_NUM):
...
@@ -136,25 +119,13 @@ for pass_id in range(PASS_NUM):
batch_size
=
batch_size
*
i
batch_size
=
batch_size
*
i
y_data
=
y_data
.
reshape
([
batch_size
,
1
])
y_data
=
y_data
.
reshape
([
batch_size
,
1
])
tensor_img
=
core
.
LoDTensor
()
loss
,
acc
=
exe
.
run
(
fluid
.
default_main_program
(),
tensor_y
=
core
.
LoDTensor
()
feed
=
{
"pixel"
:
img_data
,
tensor_img
.
set
(
img_data
,
place
)
"label"
:
y_data
},
tensor_y
.
set
(
y_data
,
place
)
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
loss
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
pass_acc
=
accuracy
.
eval
(
exe
)
print
(
"pass_id:"
+
str
(
pass_id
)
+
" batch_id:"
+
str
(
batch_id
)
+
print
(
"loss:"
+
str
(
loss
)
+
" acc:"
+
str
(
acc
)
+
" pass_acc:"
+
str
(
" loss:"
+
str
(
loss
)
+
" acc:"
+
str
(
acc
)
+
" pass_acc:"
+
str
(
pass_acc
))
pass_acc
))
# this model is slow, so if we can train two mini batch, we think it works properly.
batch_id
=
batch_id
+
1
exit
(
0
)
if
batch_id
>
1
:
# this model is slow, so if we can train two mini batch, we think it works properly.
exit
(
0
)
exit
(
1
)
exit
(
1
)
python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.dataset.conll05
as
conll05
import
paddle.v2.dataset.conll05
as
conll05
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.executor
import
Executor
,
g_scope
from
paddle.v2.fluid.optimizer
import
SGDOptimizer
word_dict
,
verb_dict
,
label_dict
=
conll05
.
get_dict
()
word_dict
,
verb_dict
,
label_dict
=
conll05
.
get_dict
()
word_dict_len
=
len
(
word_dict
)
word_dict_len
=
len
(
word_dict
)
...
@@ -34,23 +30,23 @@ def load_parameter(file_name, h, w):
...
@@ -34,23 +30,23 @@ def load_parameter(file_name, h, w):
def
db_lstm
():
def
db_lstm
():
# 8 features
# 8 features
word
=
layers
.
data
(
name
=
'word_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
word
=
fluid
.
layers
.
data
(
name
=
'word_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
predicate
=
layers
.
data
(
name
=
'verb_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
predicate
=
fluid
.
layers
.
data
(
name
=
'verb_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_n2
=
layers
.
data
(
name
=
'ctx_n2_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_n2
=
fluid
.
layers
.
data
(
name
=
'ctx_n2_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_n1
=
layers
.
data
(
name
=
'ctx_n1_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_n1
=
fluid
.
layers
.
data
(
name
=
'ctx_n1_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_0
=
layers
.
data
(
name
=
'ctx_0_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_0
=
fluid
.
layers
.
data
(
name
=
'ctx_0_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_p1
=
layers
.
data
(
name
=
'ctx_p1_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_p1
=
fluid
.
layers
.
data
(
name
=
'ctx_p1_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_p2
=
layers
.
data
(
name
=
'ctx_p2_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
ctx_p2
=
fluid
.
layers
.
data
(
name
=
'ctx_p2_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
mark
=
layers
.
data
(
name
=
'mark_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
mark
=
fluid
.
layers
.
data
(
name
=
'mark_data'
,
shape
=
[
1
],
dtype
=
'int64'
)
predicate_embedding
=
layers
.
embedding
(
predicate_embedding
=
fluid
.
layers
.
embedding
(
input
=
predicate
,
input
=
predicate
,
size
=
[
pred_len
,
word_dim
],
size
=
[
pred_len
,
word_dim
],
dtype
=
'float32'
,
dtype
=
'float32'
,
is_sparse
=
IS_SPARSE
,
is_sparse
=
IS_SPARSE
,
param_attr
=
{
'name'
:
'vemb'
})
param_attr
=
{
'name'
:
'vemb'
})
mark_embedding
=
layers
.
embedding
(
mark_embedding
=
fluid
.
layers
.
embedding
(
input
=
mark
,
input
=
mark
,
size
=
[
mark_dict_len
,
mark_dim
],
size
=
[
mark_dict_len
,
mark_dim
],
dtype
=
'float32'
,
dtype
=
'float32'
,
...
@@ -58,7 +54,7 @@ def db_lstm():
...
@@ -58,7 +54,7 @@ def db_lstm():
word_input
=
[
word
,
ctx_n2
,
ctx_n1
,
ctx_0
,
ctx_p1
,
ctx_p2
]
word_input
=
[
word
,
ctx_n2
,
ctx_n1
,
ctx_0
,
ctx_p1
,
ctx_p2
]
emb_layers
=
[
emb_layers
=
[
layers
.
embedding
(
fluid
.
layers
.
embedding
(
size
=
[
word_dict_len
,
word_dim
],
size
=
[
word_dict_len
,
word_dim
],
input
=
x
,
input
=
x
,
param_attr
=
{
'name'
:
embedding_name
,
param_attr
=
{
'name'
:
embedding_name
,
...
@@ -68,12 +64,12 @@ def db_lstm():
...
@@ -68,12 +64,12 @@ def db_lstm():
emb_layers
.
append
(
mark_embedding
)
emb_layers
.
append
(
mark_embedding
)
hidden_0_layers
=
[
hidden_0_layers
=
[
layers
.
fc
(
input
=
emb
,
size
=
hidden_dim
)
for
emb
in
emb_layers
fluid
.
layers
.
fc
(
input
=
emb
,
size
=
hidden_dim
)
for
emb
in
emb_layers
]
]
hidden_0
=
layers
.
sums
(
input
=
hidden_0_layers
)
hidden_0
=
fluid
.
layers
.
sums
(
input
=
hidden_0_layers
)
lstm_0
=
layers
.
dynamic_lstm
(
lstm_0
=
fluid
.
layers
.
dynamic_lstm
(
input
=
hidden_0
,
input
=
hidden_0
,
size
=
hidden_dim
,
size
=
hidden_dim
,
candidate_activation
=
'relu'
,
candidate_activation
=
'relu'
,
...
@@ -84,12 +80,12 @@ def db_lstm():
...
@@ -84,12 +80,12 @@ def db_lstm():
input_tmp
=
[
hidden_0
,
lstm_0
]
input_tmp
=
[
hidden_0
,
lstm_0
]
for
i
in
range
(
1
,
depth
):
for
i
in
range
(
1
,
depth
):
mix_hidden
=
layers
.
sums
(
input
=
[
mix_hidden
=
fluid
.
layers
.
sums
(
input
=
[
layers
.
fc
(
input
=
input_tmp
[
0
],
size
=
hidden_dim
),
fluid
.
layers
.
fc
(
input
=
input_tmp
[
0
],
size
=
hidden_dim
),
layers
.
fc
(
input
=
input_tmp
[
1
],
size
=
hidden_dim
)
fluid
.
layers
.
fc
(
input
=
input_tmp
[
1
],
size
=
hidden_dim
)
])
])
lstm
=
layers
.
dynamic_lstm
(
lstm
=
fluid
.
layers
.
dynamic_lstm
(
input
=
mix_hidden
,
input
=
mix_hidden
,
size
=
hidden_dim
,
size
=
hidden_dim
,
candidate_activation
=
'relu'
,
candidate_activation
=
'relu'
,
...
@@ -99,9 +95,9 @@ def db_lstm():
...
@@ -99,9 +95,9 @@ def db_lstm():
input_tmp
=
[
mix_hidden
,
lstm
]
input_tmp
=
[
mix_hidden
,
lstm
]
feature_out
=
layers
.
sums
(
input
=
[
feature_out
=
fluid
.
layers
.
sums
(
input
=
[
layers
.
fc
(
input
=
input_tmp
[
0
],
size
=
label_dict_len
),
fluid
.
layers
.
fc
(
input
=
input_tmp
[
0
],
size
=
label_dict_len
),
layers
.
fc
(
input
=
input_tmp
[
1
],
size
=
label_dict_len
)
fluid
.
layers
.
fc
(
input
=
input_tmp
[
1
],
size
=
label_dict_len
)
])
])
return
feature_out
return
feature_out
...
@@ -116,7 +112,7 @@ def to_lodtensor(data, place):
...
@@ -116,7 +112,7 @@ def to_lodtensor(data, place):
lod
.
append
(
cur_len
)
lod
.
append
(
cur_len
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
res
=
core
.
LoDTensor
()
res
=
fluid
.
LoDTensor
()
res
.
set
(
flattened_data
,
place
)
res
.
set
(
flattened_data
,
place
)
res
.
set_lod
([
lod
])
res
.
set_lod
([
lod
])
return
res
return
res
...
@@ -125,29 +121,29 @@ def to_lodtensor(data, place):
...
@@ -125,29 +121,29 @@ def to_lodtensor(data, place):
def
main
():
def
main
():
# define network topology
# define network topology
feature_out
=
db_lstm
()
feature_out
=
db_lstm
()
target
=
layers
.
data
(
name
=
'target'
,
shape
=
[
1
],
dtype
=
'int64'
)
target
=
fluid
.
layers
.
data
(
name
=
'target'
,
shape
=
[
1
],
dtype
=
'int64'
)
crf_cost
=
layers
.
linear_chain_crf
(
crf_cost
=
fluid
.
layers
.
linear_chain_crf
(
input
=
feature_out
,
input
=
feature_out
,
label
=
target
,
label
=
target
,
param_attr
=
{
"name"
:
'crfw'
,
param_attr
=
{
"name"
:
'crfw'
,
"learning_rate"
:
mix_hidden_lr
})
"learning_rate"
:
mix_hidden_lr
})
avg_cost
=
layers
.
mean
(
x
=
crf_cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
crf_cost
)
# TODO(qiao)
# TODO(qiao)
# 1. add crf_decode_layer and evaluator
# 1. add crf_decode_layer and evaluator
# 2. use other optimizer and check why out will be NAN
# 2. use other optimizer and check why out will be NAN
sgd_optimizer
=
SGDOptimizer
(
learning_rate
=
0.0001
)
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.0001
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
)
sgd_optimizer
.
minimize
(
avg_cost
)
train_data
=
paddle
.
batch
(
train_data
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
conll05
.
test
(),
buf_size
=
8192
),
paddle
.
dataset
.
conll05
.
test
(),
buf_size
=
8192
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
embedding_param
=
g_scope
.
find_var
(
embedding_name
).
get_tensor
()
embedding_param
=
fluid
.
g_scope
.
find_var
(
embedding_name
).
get_tensor
()
embedding_param
.
set
(
embedding_param
.
set
(
load_parameter
(
conll05
.
get_embedding
(),
word_dict_len
,
word_dim
),
place
)
load_parameter
(
conll05
.
get_embedding
(),
word_dict_len
,
word_dim
),
place
)
...
@@ -164,7 +160,7 @@ def main():
...
@@ -164,7 +160,7 @@ def main():
mark_data
=
to_lodtensor
(
map
(
lambda
x
:
x
[
7
],
data
),
place
)
mark_data
=
to_lodtensor
(
map
(
lambda
x
:
x
[
7
],
data
),
place
)
target
=
to_lodtensor
(
map
(
lambda
x
:
x
[
8
],
data
),
place
)
target
=
to_lodtensor
(
map
(
lambda
x
:
x
[
8
],
data
),
place
)
outs
=
exe
.
run
(
f
ramework
.
default_main_program
(),
outs
=
exe
.
run
(
f
luid
.
default_main_program
(),
feed
=
{
feed
=
{
'word_data'
:
word_data
,
'word_data'
:
word_data
,
'ctx_n2_data'
:
ctx_n2_data
,
'ctx_n2_data'
:
ctx_n2_data
,
...
...
python/paddle/v2/fluid/tests/book/test_recognize_digits_conv.py
浏览文件 @
0ac8c74e
from
__future__
import
print_function
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.evaluator
as
evaluator
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
import
paddle.v2.fluid.nets
as
nets
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.optimizer
import
AdamOptimizer
images
=
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
images
=
fluid
.
layers
.
data
(
name
=
'pixel'
,
shape
=
[
1
,
28
,
28
],
dtype
=
'float32'
)
label
=
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
label
=
fluid
.
layers
.
data
(
name
=
'label'
,
shape
=
[
1
],
dtype
=
'int64'
)
conv_pool_1
=
nets
.
simple_img_conv_pool
(
conv_pool_1
=
fluid
.
nets
.
simple_img_conv_pool
(
input
=
images
,
input
=
images
,
filter_size
=
5
,
filter_size
=
5
,
num_filters
=
20
,
num_filters
=
20
,
pool_size
=
2
,
pool_size
=
2
,
pool_stride
=
2
,
pool_stride
=
2
,
act
=
"relu"
)
act
=
"relu"
)
conv_pool_2
=
nets
.
simple_img_conv_pool
(
conv_pool_2
=
fluid
.
nets
.
simple_img_conv_pool
(
input
=
conv_pool_1
,
input
=
conv_pool_1
,
filter_size
=
5
,
filter_size
=
5
,
num_filters
=
50
,
num_filters
=
50
,
...
@@ -25,13 +20,13 @@ conv_pool_2 = nets.simple_img_conv_pool(
...
@@ -25,13 +20,13 @@ conv_pool_2 = nets.simple_img_conv_pool(
pool_stride
=
2
,
pool_stride
=
2
,
act
=
"relu"
)
act
=
"relu"
)
predict
=
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
)
predict
=
fluid
.
layers
.
fc
(
input
=
conv_pool_2
,
size
=
10
,
act
=
"softmax"
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
optimizer
=
AdamOptimizer
(
learning_rate
=
0.01
,
beta1
=
0.9
,
beta2
=
0.999
)
optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.01
)
opt
s
=
opt
imizer
.
minimize
(
avg_cost
)
optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
accuracy
=
fluid
.
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
BATCH_SIZE
=
50
BATCH_SIZE
=
50
PASS_NUM
=
3
PASS_NUM
=
3
...
@@ -40,10 +35,10 @@ train_reader = paddle.batch(
...
@@ -40,10 +35,10 @@ train_reader = paddle.batch(
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
500
),
paddle
.
dataset
.
mnist
.
train
(),
buf_size
=
500
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
for
pass_id
in
range
(
PASS_NUM
):
for
pass_id
in
range
(
PASS_NUM
):
accuracy
.
reset
(
exe
)
accuracy
.
reset
(
exe
)
...
@@ -53,17 +48,10 @@ for pass_id in range(PASS_NUM):
...
@@ -53,17 +48,10 @@ for pass_id in range(PASS_NUM):
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
y_data
.
reshape
([
BATCH_SIZE
,
1
])
y_data
=
y_data
.
reshape
([
BATCH_SIZE
,
1
])
tensor_img
=
core
.
LoDTensor
()
loss
,
acc
=
exe
.
run
(
fluid
.
default_main_program
(),
tensor_y
=
core
.
LoDTensor
()
feed
=
{
"pixel"
:
img_data
,
tensor_img
.
set
(
img_data
,
place
)
"label"
:
y_data
},
tensor_y
.
set
(
y_data
,
place
)
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
"pixel"
:
tensor_img
,
"label"
:
tensor_y
},
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
loss
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
pass_acc
=
accuracy
.
eval
(
exe
)
print
(
"pass_id="
+
str
(
pass_id
)
+
" acc="
+
str
(
acc
)
+
" pass_acc="
+
print
(
"pass_id="
+
str
(
pass_id
)
+
" acc="
+
str
(
acc
)
+
" pass_acc="
+
str
(
pass_acc
))
str
(
pass_acc
))
...
...
python/paddle/v2/fluid/tests/book/test_recognize_digits_mlp.py
浏览文件 @
0ac8c74e
from
__future__
import
print_function
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
import
paddle.v2.fluid.evaluator
as
evaluator
from
paddle.v2.fluid.io
import
get_inference_program
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.initializer
import
UniformInitializer
from
paddle.v2.fluid.optimizer
import
MomentumOptimizer
from
paddle.v2.fluid.regularizer
import
L2DecayRegularizer
BATCH_SIZE
=
128
BATCH_SIZE
=
128
image
=
layers
.
data
(
name
=
'x'
,
shape
=
[
784
],
dtype
=
'float32'
)
image
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
784
],
dtype
=
'float32'
)
param_attr
=
{
param_attr
=
{
'name'
:
None
,
'name'
:
None
,
'initializer'
:
UniformInitializer
(
'regularization'
:
fluid
.
regularizer
.
L2Decay
(
0.0005
*
BATCH_SIZE
)
low
=-
1.0
,
high
=
1.0
),
'regularization'
:
L2DecayRegularizer
(
0.0005
*
BATCH_SIZE
)
}
}
hidden1
=
layers
.
fc
(
input
=
image
,
size
=
128
,
act
=
'relu'
,
param_attr
=
param_attr
)
hidden1
=
fluid
.
layers
.
fc
(
input
=
image
,
hidden2
=
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
,
param_attr
=
param_attr
)
size
=
128
,
act
=
'relu'
,
param_attr
=
param_attr
)
hidden2
=
fluid
.
layers
.
fc
(
input
=
hidden1
,
size
=
64
,
act
=
'relu'
,
param_attr
=
param_attr
)
predict
=
layers
.
fc
(
input
=
hidden2
,
predict
=
fluid
.
layers
.
fc
(
input
=
hidden2
,
size
=
10
,
size
=
10
,
act
=
'softmax'
,
act
=
'softmax'
,
param_attr
=
param_attr
)
param_attr
=
param_attr
)
label
=
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'int64'
)
label
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
'int64'
)
cost
=
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
optimizer
=
MomentumOptimizer
(
learning_rate
=
0.001
,
momentum
=
0.9
)
optimizer
=
fluid
.
optimizer
.
Momentum
(
learning_rate
=
0.001
,
momentum
=
0.9
)
opts
=
optimizer
.
minimize
(
avg_cost
)
opts
=
optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
accuracy
=
fluid
.
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
...
@@ -45,10 +42,10 @@ train_reader = paddle.batch(
...
@@ -45,10 +42,10 @@ train_reader = paddle.batch(
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
128
)
test_reader
=
paddle
.
batch
(
paddle
.
dataset
.
mnist
.
test
(),
batch_size
=
128
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
PASS_NUM
=
100
PASS_NUM
=
100
for
pass_id
in
range
(
PASS_NUM
):
for
pass_id
in
range
(
PASS_NUM
):
...
@@ -58,13 +55,13 @@ for pass_id in range(PASS_NUM):
...
@@ -58,13 +55,13 @@ for pass_id in range(PASS_NUM):
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
np
.
expand_dims
(
y_data
,
axis
=
1
)
y_data
=
np
.
expand_dims
(
y_data
,
axis
=
1
)
tensor_x
=
core
.
LoDTensor
()
tensor_x
=
fluid
.
LoDTensor
()
tensor_x
.
set
(
x_data
,
place
)
tensor_x
.
set
(
x_data
,
place
)
tensor_y
=
core
.
LoDTensor
()
tensor_y
=
fluid
.
LoDTensor
()
tensor_y
.
set
(
y_data
,
place
)
tensor_y
.
set
(
y_data
,
place
)
outs
=
exe
.
run
(
f
ramework
.
default_main_program
(),
outs
=
exe
.
run
(
f
luid
.
default_main_program
(),
feed
=
{
'x'
:
tensor_x
,
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
fetch_list
=
[
avg_cost
]
+
accuracy
.
metrics
)
...
@@ -72,10 +69,10 @@ for pass_id in range(PASS_NUM):
...
@@ -72,10 +69,10 @@ for pass_id in range(PASS_NUM):
acc
=
np
.
array
(
outs
[
1
])
acc
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
pass_acc
=
accuracy
.
eval
(
exe
)
test_accuracy
=
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
test_accuracy
=
fluid
.
evaluator
.
Accuracy
(
input
=
predict
,
label
=
label
)
test_target
=
[
avg_cost
]
+
test_accuracy
.
metrics
+
test_accuracy
.
states
test_target
=
[
avg_cost
]
+
test_accuracy
.
metrics
+
test_accuracy
.
states
inference_program
=
get_inference_program
(
test_target
)
inference_program
=
fluid
.
io
.
get_inference_program
(
test_target
)
test_accuracy
.
reset
(
exe
)
test_accuracy
.
reset
(
exe
)
for
data
in
test_reader
():
for
data
in
test_reader
():
...
@@ -83,18 +80,10 @@ for pass_id in range(PASS_NUM):
...
@@ -83,18 +80,10 @@ for pass_id in range(PASS_NUM):
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
y_data
=
np
.
expand_dims
(
y_data
,
axis
=
1
)
y_data
=
np
.
expand_dims
(
y_data
,
axis
=
1
)
tensor_x
=
core
.
LoDTensor
()
out
,
acc
=
exe
.
run
(
inference_program
,
tensor_x
.
set
(
x_data
,
place
)
feed
=
{
'x'
:
x_data
,
'y'
:
y_data
},
tensor_y
=
core
.
LoDTensor
()
fetch_list
=
[
avg_cost
]
+
test_accuracy
.
metrics
)
tensor_y
.
set
(
y_data
,
place
)
outs
=
exe
.
run
(
inference_program
,
feed
=
{
'x'
:
tensor_x
,
'y'
:
tensor_y
},
fetch_list
=
[
avg_cost
]
+
test_accuracy
.
metrics
)
out
=
np
.
array
(
outs
[
0
])
acc
=
np
.
array
(
outs
[
1
])
test_pass_acc
=
test_accuracy
.
eval
(
exe
)
test_pass_acc
=
test_accuracy
.
eval
(
exe
)
print
(
"pass_id="
+
str
(
pass_id
)
+
" train_cost="
+
str
(
print
(
"pass_id="
+
str
(
pass_id
)
+
" train_cost="
+
str
(
...
...
python/paddle/v2/fluid/tests/book/test_understand_sentiment_conv.py
浏览文件 @
0ac8c74e
from
__future__
import
print_function
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.evaluator
as
evaluator
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
import
paddle.v2.fluid.nets
as
nets
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.optimizer
import
AdamOptimizer
def
convolution_net
(
input_dim
,
class_dim
=
2
,
emb_dim
=
32
,
hid_dim
=
32
):
def
convolution_net
(
input_dim
,
class_dim
=
2
,
emb_dim
=
32
,
hid_dim
=
32
):
data
=
layers
.
data
(
name
=
"words"
,
shape
=
[
1
],
dtype
=
"int64"
)
data
=
fluid
.
layers
.
data
(
name
=
"words"
,
shape
=
[
1
],
dtype
=
"int64"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
emb
=
layers
.
embedding
(
input
=
data
,
size
=
[
input_dim
,
emb_dim
])
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
input_dim
,
emb_dim
])
conv_3
=
nets
.
sequence_conv_pool
(
conv_3
=
fluid
.
nets
.
sequence_conv_pool
(
input
=
emb
,
input
=
emb
,
num_filters
=
hid_dim
,
num_filters
=
hid_dim
,
filter_size
=
3
,
filter_size
=
3
,
act
=
"tanh"
,
act
=
"tanh"
,
pool_type
=
"sqrt"
)
pool_type
=
"sqrt"
)
conv_4
=
nets
.
sequence_conv_pool
(
conv_4
=
fluid
.
nets
.
sequence_conv_pool
(
input
=
emb
,
input
=
emb
,
num_filters
=
hid_dim
,
num_filters
=
hid_dim
,
filter_size
=
4
,
filter_size
=
4
,
act
=
"tanh"
,
act
=
"tanh"
,
pool_type
=
"sqrt"
)
pool_type
=
"sqrt"
)
prediction
=
layers
.
fc
(
input
=
[
conv_3
,
conv_4
],
prediction
=
fluid
.
layers
.
fc
(
input
=
[
conv_3
,
conv_4
],
size
=
class_dim
,
size
=
class_dim
,
act
=
"softmax"
)
act
=
"softmax"
)
cost
=
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
adam_optimizer
=
AdamOptimizer
(
learning_rate
=
0.002
)
adam_optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.002
)
adam_optimizer
.
minimize
(
avg_cost
)
adam_optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
Accuracy
(
input
=
prediction
,
label
=
label
)
accuracy
=
fluid
.
evaluator
.
Accuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
accuracy
,
accuracy
.
metrics
[
0
]
return
avg_cost
,
accuracy
,
accuracy
.
metrics
[
0
]
...
@@ -46,7 +41,7 @@ def to_lodtensor(data, place):
...
@@ -46,7 +41,7 @@ def to_lodtensor(data, place):
lod
.
append
(
cur_len
)
lod
.
append
(
cur_len
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
res
=
core
.
LoDTensor
()
res
=
fluid
.
LoDTensor
()
res
.
set
(
flattened_data
,
place
)
res
.
set
(
flattened_data
,
place
)
res
.
set_lod
([
lod
])
res
.
set_lod
([
lod
])
return
res
return
res
...
@@ -67,10 +62,10 @@ def main():
...
@@ -67,10 +62,10 @@ def main():
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
1000
),
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
1000
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
for
pass_id
in
xrange
(
PASS_NUM
):
for
pass_id
in
xrange
(
PASS_NUM
):
accuracy
.
reset
(
exe
)
accuracy
.
reset
(
exe
)
...
@@ -80,15 +75,14 @@ def main():
...
@@ -80,15 +75,14 @@ def main():
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
label
.
reshape
([
BATCH_SIZE
,
1
])
label
=
label
.
reshape
([
BATCH_SIZE
,
1
])
tensor_label
=
core
.
LoDTensor
()
tensor_label
=
fluid
.
LoDTensor
()
tensor_label
.
set
(
label
,
place
)
tensor_label
.
set
(
label
,
place
)
outs
=
exe
.
run
(
framework
.
default_main_program
(),
cost_val
,
acc_val
=
exe
.
run
(
feed
=
{
"words"
:
tensor_words
,
fluid
.
default_main_program
(),
"label"
:
tensor_label
},
feed
=
{
"words"
:
tensor_words
,
fetch_list
=
[
cost
,
acc_out
])
"label"
:
tensor_label
},
cost_val
=
np
.
array
(
outs
[
0
])
fetch_list
=
[
cost
,
acc_out
])
acc_val
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
pass_acc
=
accuracy
.
eval
(
exe
)
print
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
)
+
print
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
)
+
" pass_acc="
+
str
(
pass_acc
))
" pass_acc="
+
str
(
pass_acc
))
...
...
python/paddle/v2/fluid/tests/book/test_understand_sentiment_dynamic_lstm.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.evaluator
as
evaluator
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.optimizer
import
AdamOptimizer
def
stacked_lstm_net
(
input_dim
,
def
stacked_lstm_net
(
input_dim
,
...
@@ -14,35 +9,35 @@ def stacked_lstm_net(input_dim,
...
@@ -14,35 +9,35 @@ def stacked_lstm_net(input_dim,
hid_dim
=
512
,
hid_dim
=
512
,
stacked_num
=
3
):
stacked_num
=
3
):
assert
stacked_num
%
2
==
1
assert
stacked_num
%
2
==
1
data
=
layers
.
data
(
name
=
"words"
,
shape
=
[
1
],
dtype
=
"int64"
)
data
=
fluid
.
layers
.
data
(
name
=
"words"
,
shape
=
[
1
],
dtype
=
"int64"
)
label
=
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
shape
=
[
1
],
dtype
=
"int64"
)
emb
=
layers
.
embedding
(
input
=
data
,
size
=
[
input_dim
,
emb_dim
])
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
input_dim
,
emb_dim
])
# add bias attr
# add bias attr
# TODO(qijun) linear act
# TODO(qijun) linear act
fc1
=
layers
.
fc
(
input
=
emb
,
size
=
hid_dim
)
fc1
=
fluid
.
layers
.
fc
(
input
=
emb
,
size
=
hid_dim
)
lstm1
,
cell1
=
layers
.
dynamic_lstm
(
input
=
fc1
,
size
=
hid_dim
)
lstm1
,
cell1
=
fluid
.
layers
.
dynamic_lstm
(
input
=
fc1
,
size
=
hid_dim
)
inputs
=
[
fc1
,
lstm1
]
inputs
=
[
fc1
,
lstm1
]
for
i
in
range
(
2
,
stacked_num
+
1
):
for
i
in
range
(
2
,
stacked_num
+
1
):
fc
=
layers
.
fc
(
input
=
inputs
,
size
=
hid_dim
)
fc
=
fluid
.
layers
.
fc
(
input
=
inputs
,
size
=
hid_dim
)
lstm
,
cell
=
layers
.
dynamic_lstm
(
lstm
,
cell
=
fluid
.
layers
.
dynamic_lstm
(
input
=
fc
,
size
=
hid_dim
,
is_reverse
=
(
i
%
2
)
==
0
)
input
=
fc
,
size
=
hid_dim
,
is_reverse
=
(
i
%
2
)
==
0
)
inputs
=
[
fc
,
lstm
]
inputs
=
[
fc
,
lstm
]
fc_last
=
layers
.
sequence_pool
(
input
=
inputs
[
0
],
pool_type
=
'max'
)
fc_last
=
fluid
.
layers
.
sequence_pool
(
input
=
inputs
[
0
],
pool_type
=
'max'
)
lstm_last
=
layers
.
sequence_pool
(
input
=
inputs
[
1
],
pool_type
=
'max'
)
lstm_last
=
fluid
.
layers
.
sequence_pool
(
input
=
inputs
[
1
],
pool_type
=
'max'
)
prediction
=
layers
.
fc
(
input
=
[
fc_last
,
lstm_last
],
prediction
=
fluid
.
layers
.
fc
(
input
=
[
fc_last
,
lstm_last
],
size
=
class_dim
,
size
=
class_dim
,
act
=
'softmax'
)
act
=
'softmax'
)
cost
=
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
adam_optimizer
=
AdamOptimizer
(
learning_rate
=
0.002
)
adam_optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.002
)
adam_optimizer
.
minimize
(
avg_cost
)
adam_optimizer
.
minimize
(
avg_cost
)
accuracy
=
evaluator
.
Accuracy
(
input
=
prediction
,
label
=
label
)
accuracy
=
fluid
.
evaluator
.
Accuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
accuracy
,
accuracy
.
metrics
[
0
]
return
avg_cost
,
accuracy
,
accuracy
.
metrics
[
0
]
...
@@ -55,7 +50,7 @@ def to_lodtensor(data, place):
...
@@ -55,7 +50,7 @@ def to_lodtensor(data, place):
lod
.
append
(
cur_len
)
lod
.
append
(
cur_len
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
res
=
core
.
LoDTensor
()
res
=
fluid
.
LoDTensor
()
res
.
set
(
flattened_data
,
place
)
res
.
set
(
flattened_data
,
place
)
res
.
set_lod
([
lod
])
res
.
set_lod
([
lod
])
return
res
return
res
...
@@ -77,10 +72,10 @@ def main():
...
@@ -77,10 +72,10 @@ def main():
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
1000
),
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
1000
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
for
pass_id
in
xrange
(
PASS_NUM
):
for
pass_id
in
xrange
(
PASS_NUM
):
accuracy
.
reset
(
exe
)
accuracy
.
reset
(
exe
)
...
@@ -90,15 +85,14 @@ def main():
...
@@ -90,15 +85,14 @@ def main():
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
label
.
reshape
([
BATCH_SIZE
,
1
])
label
=
label
.
reshape
([
BATCH_SIZE
,
1
])
tensor_label
=
core
.
LoDTensor
()
tensor_label
=
fluid
.
LoDTensor
()
tensor_label
.
set
(
label
,
place
)
tensor_label
.
set
(
label
,
place
)
outs
=
exe
.
run
(
framework
.
default_main_program
(),
cost_val
,
acc_val
=
exe
.
run
(
feed
=
{
"words"
:
tensor_words
,
fluid
.
default_main_program
(),
"label"
:
tensor_label
},
feed
=
{
"words"
:
tensor_words
,
fetch_list
=
[
cost
,
acc_out
])
"label"
:
tensor_label
},
cost_val
=
np
.
array
(
outs
[
0
])
fetch_list
=
[
cost
,
acc_out
])
acc_val
=
np
.
array
(
outs
[
1
])
pass_acc
=
accuracy
.
eval
(
exe
)
pass_acc
=
accuracy
.
eval
(
exe
)
print
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
)
+
print
(
"cost="
+
str
(
cost_val
)
+
" acc="
+
str
(
acc_val
)
+
" pass_acc="
+
str
(
pass_acc
))
" pass_acc="
+
str
(
pass_acc
))
...
...
python/paddle/v2/fluid/tests/book/test_understand_sentiment_lstm.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.optimizer
import
AdamOptimizer
def
lstm_net
(
dict_dim
,
class_dim
=
2
,
emb_dim
=
32
,
seq_len
=
80
,
batch_size
=
50
):
def
lstm_net
(
dict_dim
,
class_dim
=
2
,
emb_dim
=
32
,
seq_len
=
80
,
batch_size
=
50
):
data
=
layers
.
data
(
data
=
fluid
.
layers
.
data
(
name
=
"words"
,
name
=
"words"
,
shape
=
[
seq_len
*
batch_size
,
1
],
shape
=
[
seq_len
*
batch_size
,
1
],
append_batch_size
=
False
,
append_batch_size
=
False
,
dtype
=
"int64"
)
dtype
=
"int64"
)
label
=
layers
.
data
(
label
=
fluid
.
layers
.
data
(
name
=
"label"
,
name
=
"label"
,
shape
=
[
batch_size
,
1
],
shape
=
[
batch_size
,
1
],
append_batch_size
=
False
,
append_batch_size
=
False
,
dtype
=
"int64"
)
dtype
=
"int64"
)
emb
=
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
])
emb
=
fluid
.
layers
.
embedding
(
input
=
data
,
size
=
[
dict_dim
,
emb_dim
])
emb
=
layers
.
reshape
(
x
=
emb
,
shape
=
[
batch_size
,
seq_len
,
emb_dim
])
emb
=
fluid
.
layers
.
reshape
(
x
=
emb
,
shape
=
[
batch_size
,
seq_len
,
emb_dim
])
emb
=
layers
.
transpose
(
x
=
emb
,
axis
=
[
1
,
0
,
2
])
emb
=
fluid
.
layers
.
transpose
(
x
=
emb
,
axis
=
[
1
,
0
,
2
])
c_pre_init
=
layers
.
fill_constant
(
c_pre_init
=
fluid
.
layers
.
fill_constant
(
dtype
=
emb
.
dtype
,
shape
=
[
batch_size
,
emb_dim
],
value
=
0.0
)
dtype
=
emb
.
dtype
,
shape
=
[
batch_size
,
emb_dim
],
value
=
0.0
)
layer_1_out
=
layers
.
lstm
(
emb
,
c_pre_init
=
c_pre_init
,
hidden_dim
=
emb_dim
)
layer_1_out
=
fluid
.
layers
.
lstm
(
layer_1_out
=
layers
.
transpose
(
x
=
layer_1_out
,
axis
=
[
1
,
0
,
2
])
emb
,
c_pre_init
=
c_pre_init
,
hidden_dim
=
emb_dim
)
layer_1_out
=
fluid
.
layers
.
transpose
(
x
=
layer_1_out
,
axis
=
[
1
,
0
,
2
])
prediction
=
layers
.
fc
(
input
=
layer_1_out
,
size
=
class_dim
,
act
=
"softmax"
)
prediction
=
fluid
.
layers
.
fc
(
input
=
layer_1_out
,
cost
=
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
size
=
class_dim
,
act
=
"softmax"
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
prediction
,
label
=
label
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
adam_optimizer
=
AdamOptimizer
(
learning_rate
=
0.002
)
adam_optimizer
=
fluid
.
optimizer
.
Adam
(
learning_rate
=
0.002
)
opts
=
adam_optimizer
.
minimize
(
avg_cost
)
adam_optimizer
.
minimize
(
avg_cost
)
acc
=
layers
.
accuracy
(
input
=
prediction
,
label
=
label
)
acc
=
fluid
.
layers
.
accuracy
(
input
=
prediction
,
label
=
label
)
return
avg_cost
,
acc
return
avg_cost
,
acc
...
@@ -48,7 +47,7 @@ def to_lodtensor(data, place):
...
@@ -48,7 +47,7 @@ def to_lodtensor(data, place):
lod
.
append
(
cur_len
)
lod
.
append
(
cur_len
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
np
.
concatenate
(
data
,
axis
=
0
).
astype
(
"int64"
)
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
flattened_data
=
flattened_data
.
reshape
([
len
(
flattened_data
),
1
])
res
=
core
.
LoDTensor
()
res
=
fluid
.
LoDTensor
()
res
.
set
(
flattened_data
,
place
)
res
.
set
(
flattened_data
,
place
)
res
.
set_lod
([
lod
])
res
.
set_lod
([
lod
])
return
res
return
res
...
@@ -65,7 +64,7 @@ def prepare_feed_data(data, place):
...
@@ -65,7 +64,7 @@ def prepare_feed_data(data, place):
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
np
.
array
(
map
(
lambda
x
:
x
[
1
],
data
)).
astype
(
"int64"
)
label
=
label
.
reshape
([
len
(
label
),
1
])
label
=
label
.
reshape
([
len
(
label
),
1
])
tensor_label
=
core
.
LoDTensor
()
tensor_label
=
fluid
.
LoDTensor
()
tensor_label
.
set
(
label
,
place
)
tensor_label
.
set
(
label
,
place
)
return
tensor_words
,
tensor_label
return
tensor_words
,
tensor_label
...
@@ -86,17 +85,17 @@ def main():
...
@@ -86,17 +85,17 @@ def main():
paddle
.
reader
.
shuffle
(
paddle
.
reader
.
shuffle
(
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
BATCH_SIZE
*
10
),
paddle
.
dataset
.
imdb
.
train
(
word_dict
),
buf_size
=
BATCH_SIZE
*
10
),
batch_size
=
BATCH_SIZE
)
batch_size
=
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
for
pass_id
in
xrange
(
PASS_NUM
):
for
pass_id
in
xrange
(
PASS_NUM
):
for
data
in
train_data
():
for
data
in
train_data
():
chopped_data
=
chop_data
(
data
)
chopped_data
=
chop_data
(
data
)
tensor_words
,
tensor_label
=
prepare_feed_data
(
chopped_data
,
place
)
tensor_words
,
tensor_label
=
prepare_feed_data
(
chopped_data
,
place
)
outs
=
exe
.
run
(
f
ramework
.
default_main_program
(),
outs
=
exe
.
run
(
f
luid
.
default_main_program
(),
feed
=
{
"words"
:
tensor_words
,
feed
=
{
"words"
:
tensor_words
,
"label"
:
tensor_label
},
"label"
:
tensor_label
},
fetch_list
=
[
cost
,
acc
])
fetch_list
=
[
cost
,
acc
])
...
...
python/paddle/v2/fluid/tests/book/test_word2vec.py
浏览文件 @
0ac8c74e
import
numpy
as
np
import
numpy
as
np
import
paddle.v2
as
paddle
import
paddle.v2
as
paddle
import
paddle.v2.fluid.core
as
core
import
paddle.v2.fluid
as
fluid
import
paddle.v2.fluid.framework
as
framework
import
paddle.v2.fluid.layers
as
layers
from
paddle.v2.fluid.executor
import
Executor
from
paddle.v2.fluid.optimizer
import
SGDOptimizer
PASS_NUM
=
100
PASS_NUM
=
100
EMBED_SIZE
=
32
EMBED_SIZE
=
32
...
@@ -16,57 +12,57 @@ IS_SPARSE = True
...
@@ -16,57 +12,57 @@ IS_SPARSE = True
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
word_dict
=
paddle
.
dataset
.
imikolov
.
build_dict
()
dict_size
=
len
(
word_dict
)
dict_size
=
len
(
word_dict
)
first_word
=
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
first_word
=
fluid
.
layers
.
data
(
name
=
'firstw'
,
shape
=
[
1
],
dtype
=
'int64'
)
second_word
=
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
second_word
=
fluid
.
layers
.
data
(
name
=
'secondw'
,
shape
=
[
1
],
dtype
=
'int64'
)
third_word
=
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
third_word
=
fluid
.
layers
.
data
(
name
=
'thirdw'
,
shape
=
[
1
],
dtype
=
'int64'
)
forth_word
=
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
forth_word
=
fluid
.
layers
.
data
(
name
=
'forthw'
,
shape
=
[
1
],
dtype
=
'int64'
)
next_word
=
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
next_word
=
fluid
.
layers
.
data
(
name
=
'nextw'
,
shape
=
[
1
],
dtype
=
'int64'
)
embed_first
=
layers
.
embedding
(
embed_first
=
fluid
.
layers
.
embedding
(
input
=
first_word
,
input
=
first_word
,
size
=
[
dict_size
,
EMBED_SIZE
],
size
=
[
dict_size
,
EMBED_SIZE
],
dtype
=
'float32'
,
dtype
=
'float32'
,
is_sparse
=
IS_SPARSE
,
is_sparse
=
IS_SPARSE
,
param_attr
=
{
'name'
:
'shared_w'
})
param_attr
=
{
'name'
:
'shared_w'
})
embed_second
=
layers
.
embedding
(
embed_second
=
fluid
.
layers
.
embedding
(
input
=
second_word
,
input
=
second_word
,
size
=
[
dict_size
,
EMBED_SIZE
],
size
=
[
dict_size
,
EMBED_SIZE
],
dtype
=
'float32'
,
dtype
=
'float32'
,
is_sparse
=
IS_SPARSE
,
is_sparse
=
IS_SPARSE
,
param_attr
=
{
'name'
:
'shared_w'
})
param_attr
=
{
'name'
:
'shared_w'
})
embed_third
=
layers
.
embedding
(
embed_third
=
fluid
.
layers
.
embedding
(
input
=
third_word
,
input
=
third_word
,
size
=
[
dict_size
,
EMBED_SIZE
],
size
=
[
dict_size
,
EMBED_SIZE
],
dtype
=
'float32'
,
dtype
=
'float32'
,
is_sparse
=
IS_SPARSE
,
is_sparse
=
IS_SPARSE
,
param_attr
=
{
'name'
:
'shared_w'
})
param_attr
=
{
'name'
:
'shared_w'
})
embed_forth
=
layers
.
embedding
(
embed_forth
=
fluid
.
layers
.
embedding
(
input
=
forth_word
,
input
=
forth_word
,
size
=
[
dict_size
,
EMBED_SIZE
],
size
=
[
dict_size
,
EMBED_SIZE
],
dtype
=
'float32'
,
dtype
=
'float32'
,
is_sparse
=
IS_SPARSE
,
is_sparse
=
IS_SPARSE
,
param_attr
=
{
'name'
:
'shared_w'
})
param_attr
=
{
'name'
:
'shared_w'
})
concat_embed
=
layers
.
concat
(
concat_embed
=
fluid
.
layers
.
concat
(
input
=
[
embed_first
,
embed_second
,
embed_third
,
embed_forth
],
axis
=
1
)
input
=
[
embed_first
,
embed_second
,
embed_third
,
embed_forth
],
axis
=
1
)
hidden1
=
layers
.
fc
(
input
=
concat_embed
,
size
=
HIDDEN_SIZE
,
act
=
'sigmoid'
)
hidden1
=
fluid
.
layers
.
fc
(
input
=
concat_embed
,
size
=
HIDDEN_SIZE
,
act
=
'sigmoid'
)
predict_word
=
layers
.
fc
(
input
=
hidden1
,
size
=
dict_size
,
act
=
'softmax'
)
predict_word
=
fluid
.
layers
.
fc
(
input
=
hidden1
,
size
=
dict_size
,
act
=
'softmax'
)
cost
=
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
)
cost
=
fluid
.
layers
.
cross_entropy
(
input
=
predict_word
,
label
=
next_word
)
avg_cost
=
layers
.
mean
(
x
=
cost
)
avg_cost
=
fluid
.
layers
.
mean
(
x
=
cost
)
sgd_optimizer
=
SGDOptimizer
(
learning_rate
=
0.001
)
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
opts
=
sgd_optimizer
.
minimize
(
avg_cost
)
sgd_optimizer
.
minimize
(
avg_cost
)
train_reader
=
paddle
.
batch
(
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
imikolov
.
train
(
word_dict
,
N
),
BATCH_SIZE
)
paddle
.
dataset
.
imikolov
.
train
(
word_dict
,
N
),
BATCH_SIZE
)
place
=
core
.
CPUPlace
()
place
=
fluid
.
CPUPlace
()
exe
=
Executor
(
place
)
exe
=
fluid
.
Executor
(
place
)
# fix https://github.com/PaddlePaddle/Paddle/issues/5434 then remove
# fix https://github.com/PaddlePaddle/Paddle/issues/5434 then remove
# below exit line.
# below exit line.
exit
(
0
)
exit
(
0
)
exe
.
run
(
f
ramework
.
default_startup_program
())
exe
.
run
(
f
luid
.
default_startup_program
())
for
pass_id
in
range
(
PASS_NUM
):
for
pass_id
in
range
(
PASS_NUM
):
for
data
in
train_reader
():
for
data
in
train_reader
():
...
@@ -74,36 +70,15 @@ for pass_id in range(PASS_NUM):
...
@@ -74,36 +70,15 @@ for pass_id in range(PASS_NUM):
input_data
=
map
(
lambda
x
:
np
.
array
(
x
).
astype
(
"int64"
),
input_data
)
input_data
=
map
(
lambda
x
:
np
.
array
(
x
).
astype
(
"int64"
),
input_data
)
input_data
=
map
(
lambda
x
:
np
.
expand_dims
(
x
,
axis
=
1
),
input_data
)
input_data
=
map
(
lambda
x
:
np
.
expand_dims
(
x
,
axis
=
1
),
input_data
)
first_data
=
input_data
[
0
]
avg_cost_np
=
exe
.
run
(
fluid
.
default_main_program
(),
first_tensor
=
core
.
LoDTensor
()
feed
=
{
first_tensor
.
set
(
first_data
,
place
)
'firstw'
:
input_data
[
0
],
'secondw'
:
input_data
[
1
],
second_data
=
input_data
[
1
]
'thirdw'
:
input_data
[
2
],
second_tensor
=
core
.
LoDTensor
()
'forthw'
:
input_data
[
3
],
second_tensor
.
set
(
second_data
,
place
)
'nextw'
:
input_data
[
4
]
},
third_data
=
input_data
[
2
]
fetch_list
=
[
avg_cost
])
third_tensor
=
core
.
LoDTensor
()
if
avg_cost_np
[
0
]
<
10.0
:
third_tensor
.
set
(
third_data
,
place
)
forth_data
=
input_data
[
3
]
forth_tensor
=
core
.
LoDTensor
()
forth_tensor
.
set
(
forth_data
,
place
)
next_data
=
input_data
[
4
]
next_tensor
=
core
.
LoDTensor
()
next_tensor
.
set
(
next_data
,
place
)
outs
=
exe
.
run
(
framework
.
default_main_program
(),
feed
=
{
'firstw'
:
first_tensor
,
'secondw'
:
second_tensor
,
'thirdw'
:
third_tensor
,
'forthw'
:
forth_tensor
,
'nextw'
:
next_tensor
},
fetch_list
=
[
avg_cost
])
out
=
np
.
array
(
outs
[
0
])
if
out
[
0
]
<
10.0
:
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
exit
(
0
)
# if avg cost less than 10.0, we think our code is good.
exit
(
1
)
exit
(
1
)
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录