Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
62c51e44
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
62c51e44
编写于
5月 10, 2018
作者:
D
dzhwinter
提交者:
GitHub
5月 10, 2018
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
"add float64 tests" (#10450)
* "add float64 tests" * "fix based comment" * "fixed based comment"
上级
01a2773d
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
91 addition
and
8 deletion
+91
-8
python/paddle/fluid/executor.py
python/paddle/fluid/executor.py
+6
-2
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+11
-6
python/paddle/fluid/tests/unittests/test_network_with_dtype.py
...n/paddle/fluid/tests/unittests/test_network_with_dtype.py
+74
-0
未找到文件。
python/paddle/fluid/executor.py
浏览文件 @
62c51e44
...
@@ -299,14 +299,18 @@ class Executor(object):
...
@@ -299,14 +299,18 @@ class Executor(object):
if
feed
is
None
:
if
feed
is
None
:
feed
=
{}
feed
=
{}
if
not
isinstance
(
feed
,
dict
):
if
not
isinstance
(
feed
,
dict
):
raise
TypeError
(
"feed should be a map"
)
raise
TypeError
(
"feed requires dict as its Parameter. But you passed in %s"
%
(
type
(
feed
)))
if
fetch_list
is
None
:
if
fetch_list
is
None
:
fetch_list
=
[]
fetch_list
=
[]
if
program
is
None
:
if
program
is
None
:
program
=
default_main_program
()
program
=
default_main_program
()
if
not
isinstance
(
program
,
Program
):
if
not
isinstance
(
program
,
Program
):
raise
TypeError
()
raise
TypeError
(
"Executor requires Program as its Parameter. But you passed in %s"
%
(
type
(
program
)))
if
scope
is
None
:
if
scope
is
None
:
scope
=
global_scope
()
scope
=
global_scope
()
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
62c51e44
...
@@ -47,6 +47,8 @@ class Optimizer(object):
...
@@ -47,6 +47,8 @@ class Optimizer(object):
raise
TypeError
(
"learning rate should be float or Variable"
)
raise
TypeError
(
"learning rate should be float or Variable"
)
self
.
regularization
=
regularization
self
.
regularization
=
regularization
self
.
_learning_rate
=
learning_rate
self
.
_learning_rate
=
learning_rate
# the learning rate type should be inferenced from loss
self
.
_dtype
=
None
# each program should have a independent learning rate
# each program should have a independent learning rate
# program -> Variable(learning_rate)
# program -> Variable(learning_rate)
self
.
_learning_rate_map
=
dict
()
self
.
_learning_rate_map
=
dict
()
...
@@ -77,7 +79,7 @@ class Optimizer(object):
...
@@ -77,7 +79,7 @@ class Optimizer(object):
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_learning_rate
),
value
=
float
(
self
.
_learning_rate
),
dtype
=
'float32'
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
persistable
=
True
)
persistable
=
True
)
def
global_learning_rate
(
self
,
program
=
None
):
def
global_learning_rate
(
self
,
program
=
None
):
...
@@ -200,6 +202,7 @@ class Optimizer(object):
...
@@ -200,6 +202,7 @@ class Optimizer(object):
# Create any accumulators
# Create any accumulators
program
=
loss
.
block
.
program
program
=
loss
.
block
.
program
self
.
_dtype
=
loss
.
dtype
with
program_guard
(
program
,
startup_program
):
with
program_guard
(
program
,
startup_program
):
global_block
=
framework
.
default_main_program
().
global_block
()
global_block
=
framework
.
default_main_program
().
global_block
()
start
=
len
(
global_block
.
ops
)
start
=
len
(
global_block
.
ops
)
...
@@ -391,7 +394,7 @@ class AdamOptimizer(Optimizer):
...
@@ -391,7 +394,7 @@ class AdamOptimizer(Optimizer):
beta_shape
=
[
1
]
beta_shape
=
[
1
]
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
.
generate
(
'beta1_pow_acc'
),
name
=
unique_name
.
generate
(
'beta1_pow_acc'
),
dtype
=
'float32'
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
shape
=
beta_shape
,
shape
=
beta_shape
,
lod_level
=
0
,
lod_level
=
0
,
persistable
=
True
)
persistable
=
True
)
...
@@ -400,7 +403,7 @@ class AdamOptimizer(Optimizer):
...
@@ -400,7 +403,7 @@ class AdamOptimizer(Optimizer):
self
.
_beta2_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta2_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
.
generate
(
'beta2_pow_acc'
),
name
=
unique_name
.
generate
(
'beta2_pow_acc'
),
dtype
=
'float32'
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
shape
=
beta_shape
,
shape
=
beta_shape
,
lod_level
=
0
,
lod_level
=
0
,
persistable
=
True
)
persistable
=
True
)
...
@@ -493,7 +496,7 @@ class AdamaxOptimizer(Optimizer):
...
@@ -493,7 +496,7 @@ class AdamaxOptimizer(Optimizer):
beta_shape
=
[
1
]
beta_shape
=
[
1
]
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
self
.
_beta1_pow_acc
=
self
.
helper
.
create_global_variable
(
name
=
unique_name
.
generate
(
'beta1_pow_acc'
),
name
=
unique_name
.
generate
(
'beta1_pow_acc'
),
dtype
=
'float32'
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
,
shape
=
beta_shape
,
shape
=
beta_shape
,
lod_level
=
0
,
lod_level
=
0
,
persistable
=
True
)
persistable
=
True
)
...
@@ -900,8 +903,10 @@ class ModelAverage(Optimizer):
...
@@ -900,8 +903,10 @@ class ModelAverage(Optimizer):
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
# param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)
tmp
=
layers
.
sum
(
x
=
[
num_accumulates
,
old_num_accumulates
])
tmp
=
layers
.
sum
(
x
=
[
num_accumulates
,
old_num_accumulates
])
sum
=
layers
.
sum
(
x
=
[
sum_1
,
sum_2
,
sum_3
])
sum
=
layers
.
sum
(
x
=
[
sum_1
,
sum_2
,
sum_3
])
tmp
=
layers
.
cast
(
x
=
tmp
,
dtype
=
'float32'
)
tmp
=
layers
.
cast
(
sum
=
layers
.
cast
(
x
=
sum
,
dtype
=
'float32'
)
x
=
tmp
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
)
sum
=
layers
.
cast
(
x
=
sum
,
dtype
=
'float32'
if
self
.
_dtype
==
None
else
self
.
_dtype
)
layers
.
elementwise_div
(
x
=
sum
,
y
=
tmp
,
out
=
param
)
layers
.
elementwise_div
(
x
=
sum
,
y
=
tmp
,
out
=
param
)
def
_add_average_restore_op
(
self
,
block
,
param_grad
):
def
_add_average_restore_op
(
self
,
block
,
param_grad
):
...
...
python/paddle/fluid/tests/unittests/test_network_with_dtype.py
0 → 100644
浏览文件 @
62c51e44
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
unittest
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
from
paddle.fluid.executor
import
Executor
BATCH_SIZE
=
20
class
TestNetWithDtype
(
unittest
.
TestCase
):
def
setUp
(
self
):
self
.
dtype
=
"float64"
self
.
init_dtype
()
self
.
x
=
fluid
.
layers
.
data
(
name
=
'x'
,
shape
=
[
13
],
dtype
=
self
.
dtype
)
self
.
y
=
fluid
.
layers
.
data
(
name
=
'y'
,
shape
=
[
1
],
dtype
=
self
.
dtype
)
y_predict
=
fluid
.
layers
.
fc
(
input
=
self
.
x
,
size
=
1
,
act
=
None
)
cost
=
fluid
.
layers
.
square_error_cost
(
input
=
y_predict
,
label
=
self
.
y
)
avg_cost
=
fluid
.
layers
.
mean
(
cost
)
self
.
fetch_list
=
[
avg_cost
]
sgd_optimizer
=
fluid
.
optimizer
.
SGD
(
learning_rate
=
0.001
)
sgd_optimizer
.
minimize
(
avg_cost
)
def
run_net_on_place
(
self
,
place
):
train_reader
=
paddle
.
batch
(
paddle
.
dataset
.
uci_housing
.
train
(),
batch_size
=
BATCH_SIZE
)
feeder
=
fluid
.
DataFeeder
(
place
=
place
,
feed_list
=
[
self
.
x
,
self
.
y
])
exe
=
fluid
.
Executor
(
place
)
exe
.
run
(
fluid
.
default_startup_program
())
for
data
in
train_reader
():
exe
.
run
(
fluid
.
default_main_program
(),
feed
=
feeder
.
feed
(
data
),
fetch_list
=
self
.
fetch_list
)
# the main program is runable, the datatype is fully supported
break
def
init_dtype
(
self
):
pass
def
test_cpu
(
self
):
place
=
fluid
.
CPUPlace
()
self
.
run_net_on_place
(
place
)
def
test_gpu
(
self
):
if
not
core
.
is_compiled_with_cuda
():
return
place
=
fluid
.
CUDAPlace
(
0
)
self
.
run_net_on_place
(
place
)
# TODO(dzhwinter): make sure the fp16 is runable
# class TestFloat16(SimpleNet):
# def init_dtype(self):
# self.dtype = "float16"
if
__name__
==
'__main__'
:
unittest
.
main
()
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录