Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
5c64d84f
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
5c64d84f
编写于
12月 09, 2022
作者:
C
cyber-pioneer
提交者:
GitHub
12月 09, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
move fluid.layers.create_global_var to static.create_global_var (#48777)
上级
0f6c5459
变更
38
隐藏空白更改
内联
并排
Showing
38 changed file
with
179 addition
and
184 deletion
+179
-184
python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
...paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
+5
-5
python/paddle/distributed/fleet/metrics/metric.py
python/paddle/distributed/fleet/metrics/metric.py
+7
-7
python/paddle/distributed/passes/auto_parallel_gradient_merge.py
...paddle/distributed/passes/auto_parallel_gradient_merge.py
+3
-3
python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py
...on/paddle/fluid/contrib/mixed_precision/bf16/decorator.py
+2
-1
python/paddle/fluid/contrib/mixed_precision/decorator.py
python/paddle/fluid/contrib/mixed_precision/decorator.py
+4
-4
python/paddle/fluid/contrib/optimizer.py
python/paddle/fluid/contrib/optimizer.py
+1
-1
python/paddle/fluid/dygraph/learning_rate_scheduler.py
python/paddle/fluid/dygraph/learning_rate_scheduler.py
+1
-1
python/paddle/fluid/layers/control_flow.py
python/paddle/fluid/layers/control_flow.py
+2
-1
python/paddle/fluid/layers/learning_rate_scheduler.py
python/paddle/fluid/layers/learning_rate_scheduler.py
+2
-2
python/paddle/fluid/layers/tensor.py
python/paddle/fluid/layers/tensor.py
+0
-87
python/paddle/fluid/optimizer.py
python/paddle/fluid/optimizer.py
+18
-17
python/paddle/fluid/tests/test_python_operator_overriding.py
python/paddle/fluid/tests/test_python_operator_overriding.py
+2
-3
python/paddle/fluid/tests/unittests/dist_transformer.py
python/paddle/fluid/tests/unittests/dist_transformer.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
...addle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
+1
-1
python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py
python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py
+3
-3
python/paddle/fluid/tests/unittests/test_adam_op.py
python/paddle/fluid/tests/unittests/test_adam_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_adamw_op.py
python/paddle/fluid/tests/unittests/test_adamw_op.py
+2
-2
python/paddle/fluid/tests/unittests/test_create_global_var.py
...on/paddle/fluid/tests/unittests/test_create_global_var.py
+4
-4
python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py
.../fluid/tests/unittests/test_eager_deletion_padding_rnn.py
+1
-1
python/paddle/fluid/tests/unittests/test_fleet_metric.py
python/paddle/fluid/tests/unittests/test_fleet_metric.py
+2
-2
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
...paddle/fluid/tests/unittests/test_imperative_optimizer.py
+1
-1
python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py
...dle/fluid/tests/unittests/test_imperative_optimizer_v2.py
+1
-1
python/paddle/fluid/tests/unittests/test_lr_scheduler.py
python/paddle/fluid/tests/unittests/test_lr_scheduler.py
+1
-1
python/paddle/fluid/tests/unittests/test_parallel_executor_feed_persistable_var.py
.../unittests/test_parallel_executor_feed_persistable_var.py
+1
-1
python/paddle/fluid/tests/unittests/test_queue.py
python/paddle/fluid/tests/unittests/test_queue.py
+3
-3
python/paddle/fluid/tests/unittests/test_switch.py
python/paddle/fluid/tests/unittests/test_switch.py
+2
-2
python/paddle/fluid/tests/unittests/test_var_info.py
python/paddle/fluid/tests/unittests/test_var_info.py
+2
-2
python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py
python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py
+2
-2
python/paddle/incubate/optimizer/distributed_fused_lamb.py
python/paddle/incubate/optimizer/distributed_fused_lamb.py
+2
-2
python/paddle/incubate/optimizer/lookahead.py
python/paddle/incubate/optimizer/lookahead.py
+3
-3
python/paddle/optimizer/adam.py
python/paddle/optimizer/adam.py
+2
-2
python/paddle/optimizer/adamw.py
python/paddle/optimizer/adamw.py
+2
-2
python/paddle/optimizer/lamb.py
python/paddle/optimizer/lamb.py
+3
-2
python/paddle/optimizer/momentum.py
python/paddle/optimizer/momentum.py
+2
-2
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+2
-2
python/paddle/optimizer/sgd.py
python/paddle/optimizer/sgd.py
+3
-2
python/paddle/static/__init__.py
python/paddle/static/__init__.py
+1
-1
python/paddle/tensor/creation.py
python/paddle/tensor/creation.py
+79
-1
未找到文件。
python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py
浏览文件 @
5c64d84f
...
@@ -23,9 +23,9 @@ from paddle import framework
...
@@ -23,9 +23,9 @@ from paddle import framework
from
paddle.common_ops_import
import
LayerHelper
from
paddle.common_ops_import
import
LayerHelper
from
paddle.fluid.clip
import
GradientClipByNorm
,
append_gradient_clip_ops
from
paddle.fluid.clip
import
GradientClipByNorm
,
append_gradient_clip_ops
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.layers
import
tensor
from
paddle.fluid.optimizer
import
Momentum
,
Optimizer
from
paddle.fluid.optimizer
import
Momentum
,
Optimizer
from
paddle.framework
import
core
from
paddle.framework
import
core
from
paddle.static
import
create_global_var
class
DGCMomentumOptimizer
(
Optimizer
):
class
DGCMomentumOptimizer
(
Optimizer
):
...
@@ -217,7 +217,7 @@ class DGCMomentumOptimizer(Optimizer):
...
@@ -217,7 +217,7 @@ class DGCMomentumOptimizer(Optimizer):
)
)
# rampup begin step var for all_reduce_op_handle
# rampup begin step var for all_reduce_op_handle
self
.
_rampup_begin_step_var
=
tensor
.
create_global_var
(
self
.
_rampup_begin_step_var
=
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
dtype
=
core
.
VarDesc
.
VarType
.
FP32
,
persistable
=
True
,
persistable
=
True
,
...
@@ -237,7 +237,7 @@ class DGCMomentumOptimizer(Optimizer):
...
@@ -237,7 +237,7 @@ class DGCMomentumOptimizer(Optimizer):
v_var
=
self
.
_add_accumulator
(
self
.
_v_velocity_acc_str
,
param_var
)
v_var
=
self
.
_add_accumulator
(
self
.
_v_velocity_acc_str
,
param_var
)
k_var
=
tensor
.
create_global_var
(
k_var
=
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
dtype
=
param_var
.
dtype
,
dtype
=
param_var
.
dtype
,
persistable
=
True
,
persistable
=
True
,
...
@@ -246,7 +246,7 @@ class DGCMomentumOptimizer(Optimizer):
...
@@ -246,7 +246,7 @@ class DGCMomentumOptimizer(Optimizer):
force_cpu
=
True
,
force_cpu
=
True
,
)
)
encoded_var
=
tensor
.
create_global_var
(
encoded_var
=
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
dtype
=
param_var
.
dtype
,
dtype
=
param_var
.
dtype
,
persistable
=
True
,
persistable
=
True
,
...
@@ -255,7 +255,7 @@ class DGCMomentumOptimizer(Optimizer):
...
@@ -255,7 +255,7 @@ class DGCMomentumOptimizer(Optimizer):
force_cpu
=
False
,
force_cpu
=
False
,
)
)
gather_var
=
tensor
.
create_global_var
(
gather_var
=
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
dtype
=
param_var
.
dtype
,
dtype
=
param_var
.
dtype
,
persistable
=
True
,
persistable
=
True
,
...
...
python/paddle/distributed/fleet/metrics/metric.py
浏览文件 @
5c64d84f
...
@@ -40,7 +40,7 @@ def sum(input, scope=None, util=None):
...
@@ -40,7 +40,7 @@ def sum(input, scope=None, util=None):
# in model.py
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
cnt = paddle.sum(input)
global_cnt =
fluid.layers
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_cnt =
paddle.static
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_add(cnt, global_cnt)
tmp = fluid.layers.elementwise_add(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
fluid.layers.assign(tmp, global_cnt)
...
@@ -80,7 +80,7 @@ def max(input, scope=None, util=None):
...
@@ -80,7 +80,7 @@ def max(input, scope=None, util=None):
# in model.py
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
cnt = paddle.sum(input)
global_cnt =
fluid.layers
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_cnt =
paddle.static
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = paddle.maximum(cnt, global_cnt)
tmp = paddle.maximum(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
fluid.layers.assign(tmp, global_cnt)
...
@@ -120,7 +120,7 @@ def min(input, scope=None, util=None):
...
@@ -120,7 +120,7 @@ def min(input, scope=None, util=None):
# in model.py
# in model.py
input = fluid.layers.cast(some_input, dtype='float32')
input = fluid.layers.cast(some_input, dtype='float32')
cnt = paddle.sum(input)
cnt = paddle.sum(input)
global_cnt =
fluid.layers
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_cnt =
paddle.static
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp = fluid.layers.elementwise_min(cnt, global_cnt)
tmp = fluid.layers.elementwise_min(cnt, global_cnt)
fluid.layers.assign(tmp, global_cnt)
fluid.layers.assign(tmp, global_cnt)
...
@@ -391,15 +391,15 @@ def acc(correct, total, scope=None, util=None):
...
@@ -391,15 +391,15 @@ def acc(correct, total, scope=None, util=None):
.. code-block:: python
.. code-block:: python
# in model.py
# in model.py
correct =
fluid.layers
.create_global_var(dtype='float32', shape=[1], value=0)
correct =
paddle.static
.create_global_var(dtype='float32', shape=[1], value=0)
total =
fluid.layers
.create_global_var(dtype='float32', shape=[1], value=0)
total =
paddle.static
.create_global_var(dtype='float32', shape=[1], value=0)
acc = fluid.layers.acc(predict, label, k=1, correct=correct, total=total)
acc = fluid.layers.acc(predict, label, k=1, correct=correct, total=total)
global_correct =
fluid.layers
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_correct =
paddle.static
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp1 = fluid.layers.elementwise_min(correct, global_correct)
tmp1 = fluid.layers.elementwise_min(correct, global_correct)
fluid.layers.assign(tmp1, global_correct)
fluid.layers.assign(tmp1, global_correct)
global_total =
fluid.layers
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
global_total =
paddle.static
.create_global_var(persistable=True, dtype='float32', shape=[1], value=0)
tmp2 = fluid.layers.elementwise_min(total, global_total)
tmp2 = fluid.layers.elementwise_min(total, global_total)
fluid.layers.assign(tmp2, global_total)
fluid.layers.assign(tmp2, global_total)
...
...
python/paddle/distributed/passes/auto_parallel_gradient_merge.py
浏览文件 @
5c64d84f
...
@@ -64,7 +64,7 @@ def _remove_and_get_optimizer_op(main_program, dist_context):
...
@@ -64,7 +64,7 @@ def _remove_and_get_optimizer_op(main_program, dist_context):
def
_get_gm_cond_var
(
main_program
,
k_steps
,
dist_context
):
def
_get_gm_cond_var
(
main_program
,
k_steps
,
dist_context
):
main_block
=
main_program
.
global_block
()
main_block
=
main_program
.
global_block
()
# Add const var
# Add const var
k_step_var
=
layers
.
create_global_var
(
k_step_var
=
paddle
.
static
.
create_global_var
(
name
=
"gradient_merge_k"
,
name
=
"gradient_merge_k"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
k_steps
),
value
=
int
(
k_steps
),
...
@@ -74,7 +74,7 @@ def _get_gm_cond_var(main_program, k_steps, dist_context):
...
@@ -74,7 +74,7 @@ def _get_gm_cond_var(main_program, k_steps, dist_context):
)
)
set_var_dist_attr
(
dist_context
,
k_step_var
,
[
-
1
],
world_process_group
.
ranks
)
set_var_dist_attr
(
dist_context
,
k_step_var
,
[
-
1
],
world_process_group
.
ranks
)
zero_var
=
layers
.
create_global_var
(
zero_var
=
paddle
.
static
.
create_global_var
(
name
=
"gradient_merge_zero"
,
name
=
"gradient_merge_zero"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
0
),
value
=
int
(
0
),
...
@@ -85,7 +85,7 @@ def _get_gm_cond_var(main_program, k_steps, dist_context):
...
@@ -85,7 +85,7 @@ def _get_gm_cond_var(main_program, k_steps, dist_context):
set_var_dist_attr
(
dist_context
,
zero_var
,
[
-
1
],
world_process_group
.
ranks
)
set_var_dist_attr
(
dist_context
,
zero_var
,
[
-
1
],
world_process_group
.
ranks
)
# Add step var & cond var
# Add step var & cond var
step_var
=
layers
.
create_global_var
(
step_var
=
paddle
.
static
.
create_global_var
(
name
=
"gradient_merge_step"
,
name
=
"gradient_merge_step"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
0
),
value
=
int
(
0
),
...
...
python/paddle/fluid/contrib/mixed_precision/bf16/decorator.py
浏览文件 @
5c64d84f
...
@@ -12,6 +12,7 @@
...
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
paddle
from
paddle.fluid
import
(
from
paddle.fluid
import
(
core
,
core
,
default_main_program
,
default_main_program
,
...
@@ -68,7 +69,7 @@ class OptimizerWithMixedPrecision:
...
@@ -68,7 +69,7 @@ class OptimizerWithMixedPrecision:
if
isinstance
(
self
.
_optimizer
.
_learning_rate
,
float
):
if
isinstance
(
self
.
_optimizer
.
_learning_rate
,
float
):
self
.
_optimizer
.
_learning_rate_map
[
self
.
_optimizer
.
_learning_rate_map
[
default_main_program
()
default_main_program
()
]
=
layers
.
create_global_var
(
]
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_optimizer
.
_learning_rate
),
value
=
float
(
self
.
_optimizer
.
_learning_rate
),
...
...
python/paddle/fluid/contrib/mixed_precision/decorator.py
浏览文件 @
5c64d84f
...
@@ -122,7 +122,7 @@ class OptimizerWithMixedPrecision:
...
@@ -122,7 +122,7 @@ class OptimizerWithMixedPrecision:
return
getattr
(
self
.
_optimizer
,
"_supports_check_nan_inf"
,
False
)
return
getattr
(
self
.
_optimizer
,
"_supports_check_nan_inf"
,
False
)
def
_init_amp_var
(
self
):
def
_init_amp_var
(
self
):
self
.
_loss_scaling
=
layers
.
create_global_var
(
self
.
_loss_scaling
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"loss_scaling"
),
name
=
unique_name
.
generate
(
"loss_scaling"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
self
.
_init_loss_scaling
,
value
=
self
.
_init_loss_scaling
,
...
@@ -131,14 +131,14 @@ class OptimizerWithMixedPrecision:
...
@@ -131,14 +131,14 @@ class OptimizerWithMixedPrecision:
)
)
if
self
.
_use_dynamic_loss_scaling
:
if
self
.
_use_dynamic_loss_scaling
:
self
.
_num_good_steps
=
layers
.
create_global_var
(
self
.
_num_good_steps
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"num_good_steps"
),
name
=
unique_name
.
generate
(
"num_good_steps"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
0
,
value
=
0
,
dtype
=
'int32'
,
dtype
=
'int32'
,
persistable
=
True
,
persistable
=
True
,
)
)
self
.
_num_bad_steps
=
layers
.
create_global_var
(
self
.
_num_bad_steps
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"num_bad_steps"
),
name
=
unique_name
.
generate
(
"num_bad_steps"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
0
,
value
=
0
,
...
@@ -151,7 +151,7 @@ class OptimizerWithMixedPrecision:
...
@@ -151,7 +151,7 @@ class OptimizerWithMixedPrecision:
if
isinstance
(
self
.
_optimizer
.
_learning_rate
,
float
):
if
isinstance
(
self
.
_optimizer
.
_learning_rate
,
float
):
self
.
_optimizer
.
_learning_rate_map
[
self
.
_optimizer
.
_learning_rate_map
[
default_main_program
()
default_main_program
()
]
=
layers
.
create_global_var
(
]
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_optimizer
.
_learning_rate
),
value
=
float
(
self
.
_optimizer
.
_learning_rate
),
...
...
python/paddle/fluid/contrib/optimizer.py
浏览文件 @
5c64d84f
...
@@ -143,7 +143,7 @@ class Momentum(Optimizer):
...
@@ -143,7 +143,7 @@ class Momentum(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
...
python/paddle/fluid/dygraph/learning_rate_scheduler.py
浏览文件 @
5c64d84f
...
@@ -68,7 +68,7 @@ class LearningRateDecay:
...
@@ -68,7 +68,7 @@ class LearningRateDecay:
"""
"""
from
..
import
layers
from
..
import
layers
lr
=
layers
.
create_global_var
(
lr
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
lr
),
value
=
float
(
lr
),
...
...
python/paddle/fluid/layers/control_flow.py
浏览文件 @
5c64d84f
...
@@ -2368,9 +2368,10 @@ class Switch:
...
@@ -2368,9 +2368,10 @@ class Switch:
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid as fluid
lr =
fluid.layers
.create_global_var(
lr =
paddle.static
.create_global_var(
shape=[1],
shape=[1],
value=0.0,
value=0.0,
dtype='float32',
dtype='float32',
...
...
python/paddle/fluid/layers/learning_rate_scheduler.py
浏览文件 @
5c64d84f
...
@@ -420,7 +420,7 @@ def piecewise_decay(boundaries, values):
...
@@ -420,7 +420,7 @@ def piecewise_decay(boundaries, values):
else
:
else
:
global_step
=
_decay_step_counter
()
global_step
=
_decay_step_counter
()
lr
=
tensor
.
create_global_var
(
lr
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
0.0
,
value
=
0.0
,
dtype
=
'float32'
,
dtype
=
'float32'
,
...
@@ -575,7 +575,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
...
@@ -575,7 +575,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr):
)
)
return
lr
return
lr
else
:
else
:
lr
=
tensor
.
create_global_var
(
lr
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
0.0
,
value
=
0.0
,
dtype
=
dtype
,
dtype
=
dtype
,
...
...
python/paddle/fluid/layers/tensor.py
浏览文件 @
5c64d84f
...
@@ -12,25 +12,19 @@
...
@@ -12,25 +12,19 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
math
import
numpy
import
numpy
import
warnings
import
warnings
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..param_attr
import
ParamAttr
from
..initializer
import
Initializer
from
..framework
import
(
from
..framework
import
(
_current_expected_place
,
_current_expected_place
,
convert_np_dtype_to_dtype_
,
convert_np_dtype_to_dtype_
,
_non_static_mode
,
_non_static_mode
,
_varbase_creator
,
_varbase_creator
,
device_guard
,
_in_legacy_dygraph
,
_in_legacy_dygraph
,
in_dygraph_mode
,
in_dygraph_mode
,
_get_paddle_place
,
)
)
from
..framework
import
Variable
from
..framework
import
Variable
from
..initializer
import
Constant
from
..core
import
VarDesc
from
..core
import
VarDesc
from
..
import
core
from
..
import
core
from
.layer_function_generator
import
templatedoc
from
.layer_function_generator
import
templatedoc
...
@@ -47,7 +41,6 @@ from .utils import check_shape
...
@@ -47,7 +41,6 @@ from .utils import check_shape
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
__all__
=
[
__all__
=
[
'create_global_var'
,
'cast'
,
'cast'
,
'tensor_array_to_tensor'
,
'tensor_array_to_tensor'
,
'concat'
,
'concat'
,
...
@@ -61,86 +54,6 @@ __all__ = [
...
@@ -61,86 +54,6 @@ __all__ = [
]
]
def
create_global_var
(
shape
,
value
,
dtype
,
persistable
=
False
,
force_cpu
=
False
,
name
=
None
):
"""
This function creates a new tensor variable with value in the global block(block 0).
Parameters:
shape (list[int]|tuple[int]): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
numpy
.
ndarray
),
'create_global_var'
)
for
item
in
shape
:
check_type
(
item
,
'item of shape'
,
(
int
,
numpy
.
uint8
,
numpy
.
int8
,
numpy
.
int16
,
numpy
.
int32
,
numpy
.
int64
,
),
'create_global_var'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'create_global_var'
,
)
helper
=
LayerHelper
(
"global_var"
,
**
locals
())
var
=
helper
.
create_global_variable
(
dtype
=
dtype
,
shape
=
shape
,
persistable
=
persistable
,
name
=
name
,
stop_gradient
=
True
,
)
helper
.
set_variable_initializer
(
var
,
initializer
=
Constant
(
value
=
float
(
value
),
force_cpu
=
force_cpu
)
)
return
var
def
cast
(
x
,
dtype
):
def
cast
(
x
,
dtype
):
"""
"""
...
...
python/paddle/fluid/optimizer.py
浏览文件 @
5c64d84f
...
@@ -418,7 +418,7 @@ class Optimizer:
...
@@ -418,7 +418,7 @@ class Optimizer:
else
:
else
:
self
.
_learning_rate_map
[
self
.
_learning_rate_map
[
framework
.
default_main_program
()
framework
.
default_main_program
()
]
=
layers
.
create_global_var
(
]
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_learning_rate
),
value
=
float
(
self
.
_learning_rate
),
...
@@ -449,7 +449,7 @@ class Optimizer:
...
@@ -449,7 +449,7 @@ class Optimizer:
# create learning rate in the current main program
# create learning rate in the current main program
self
.
_learning_rate_map
[
self
.
_learning_rate_map
[
framework
.
default_main_program
()
framework
.
default_main_program
()
]
=
layers
.
create_global_var
(
]
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_learning_rate
),
value
=
float
(
self
.
_learning_rate
),
...
@@ -474,6 +474,7 @@ class Optimizer:
...
@@ -474,6 +474,7 @@ class Optimizer:
Examples:
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import paddle.fluid as fluid
import paddle.fluid as fluid
import paddle
import paddle
...
@@ -497,7 +498,7 @@ class Optimizer:
...
@@ -497,7 +498,7 @@ class Optimizer:
# set learning rate manually by framework Variable
# set learning rate manually by framework Variable
lr_var =
fluid.layers
.create_global_var(
lr_var =
paddle.static
.create_global_var(
shape=[1], value=0.7, dtype='float32')
shape=[1], value=0.7, dtype='float32')
adam.set_lr(lr_var)
adam.set_lr(lr_var)
lr = adam.current_step_lr()
lr = adam.current_step_lr()
...
@@ -1498,7 +1499,7 @@ class SGDOptimizer(Optimizer):
...
@@ -1498,7 +1499,7 @@ class SGDOptimizer(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
@@ -1859,7 +1860,7 @@ class LarsMomentumOptimizer(Optimizer):
...
@@ -1859,7 +1860,7 @@ class LarsMomentumOptimizer(Optimizer):
var_name
=
param
.
name
+
'_fp32_master'
var_name
=
param
.
name
+
'_fp32_master'
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
@@ -2267,21 +2268,21 @@ class AdamOptimizer(Optimizer):
...
@@ -2267,21 +2268,21 @@ class AdamOptimizer(Optimizer):
def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate, epsilon_init):
def get_decayed_betas(beta1_init, beta2_init, decay_steps, decay_rate, epsilon_init):
global_step = lr_scheduler._decay_step_counter()
global_step = lr_scheduler._decay_step_counter()
beta1 =
fluid.layers
.create_global_var(
beta1 =
paddle.static
.create_global_var(
shape=[1],
shape=[1],
value=float(beta1_init),
value=float(beta1_init),
dtype='float32',
dtype='float32',
# set persistable for save checkpoints and resume
# set persistable for save checkpoints and resume
persistable=True,
persistable=True,
name="beta1")
name="beta1")
beta2 =
fluid.layers
.create_global_var(
beta2 =
paddle.static
.create_global_var(
shape=[1],
shape=[1],
value=float(beta2_init),
value=float(beta2_init),
dtype='float32',
dtype='float32',
# set persistable for save checkpoints and resume
# set persistable for save checkpoints and resume
persistable=True,
persistable=True,
name="beta2")
name="beta2")
epsilon =
fluid.layers
.create_global_var(
epsilon =
paddle.static
.create_global_var(
shape=[1],
shape=[1],
value=float(epsilon_init),
value=float(epsilon_init),
dtype='float32',
dtype='float32',
...
@@ -4326,7 +4327,7 @@ class ExponentialMovingAverage:
...
@@ -4326,7 +4327,7 @@ class ExponentialMovingAverage:
def
_get_ema_decay
(
self
):
def
_get_ema_decay
(
self
):
with
default_main_program
().
_lr_schedule_guard
():
with
default_main_program
().
_lr_schedule_guard
():
decay_var
=
layers
.
tensor
.
create_global_var
(
decay_var
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
self
.
_decay
,
value
=
self
.
_decay
,
dtype
=
'float32'
,
dtype
=
'float32'
,
...
@@ -4346,7 +4347,7 @@ class ExponentialMovingAverage:
...
@@ -4346,7 +4347,7 @@ class ExponentialMovingAverage:
return
decay_var
return
decay_var
def
_get_decay_pow
(
self
,
block
):
def
_get_decay_pow
(
self
,
block
):
global_step
=
layers
.
create_global_var
(
global_step
=
paddle
.
static
.
create_global_var
(
name
=
self
.
_step_counter_name
,
name
=
self
.
_step_counter_name
,
shape
=
[
1
],
shape
=
[
1
],
value
=
0
,
value
=
0
,
...
@@ -4359,7 +4360,7 @@ class ExponentialMovingAverage:
...
@@ -4359,7 +4360,7 @@ class ExponentialMovingAverage:
return
decay_pow_acc
,
global_step
return
decay_pow_acc
,
global_step
def
_create_ema_vars
(
self
,
param
):
def
_create_ema_vars
(
self
,
param
):
param_ema
=
layers
.
create_global_var
(
param_ema
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
self
.
_name
+
param
.
name
+
'_ema'
),
name
=
unique_name
.
generate
(
self
.
_name
+
param
.
name
+
'_ema'
),
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0.0
,
value
=
0.0
,
...
@@ -7273,7 +7274,7 @@ class LookaheadOptimizer:
...
@@ -7273,7 +7274,7 @@ class LookaheadOptimizer:
with
framework
.
program_guard
(
main_block
.
program
,
startup_program
):
with
framework
.
program_guard
(
main_block
.
program
,
startup_program
):
# Add Var k to main prog and startup prog
# Add Var k to main prog and startup prog
k
=
layers
.
create_global_var
(
k
=
paddle
.
static
.
create_global_var
(
name
=
"lookahead_k"
,
name
=
"lookahead_k"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
self
.
k
),
value
=
int
(
self
.
k
),
...
@@ -7282,7 +7283,7 @@ class LookaheadOptimizer:
...
@@ -7282,7 +7283,7 @@ class LookaheadOptimizer:
)
)
# Add Var alpha to main prog and startup prog
# Add Var alpha to main prog and startup prog
alpha
=
layers
.
create_global_var
(
alpha
=
paddle
.
static
.
create_global_var
(
name
=
"lookahead_alpha"
,
name
=
"lookahead_alpha"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
alpha
),
value
=
float
(
self
.
alpha
),
...
@@ -7291,7 +7292,7 @@ class LookaheadOptimizer:
...
@@ -7291,7 +7292,7 @@ class LookaheadOptimizer:
)
)
# Add Var step
# Add Var step
step
=
layers
.
create_global_var
(
step
=
paddle
.
static
.
create_global_var
(
name
=
"lookahead_step"
,
name
=
"lookahead_step"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
0
),
value
=
int
(
0
),
...
@@ -7498,7 +7499,7 @@ class GradientMergeOptimizer:
...
@@ -7498,7 +7499,7 @@ class GradientMergeOptimizer:
def
_get_gm_cond_var
(
self
,
main_block
):
def
_get_gm_cond_var
(
self
,
main_block
):
# Add const var
# Add const var
k_step_var
=
layers
.
create_global_var
(
k_step_var
=
paddle
.
static
.
create_global_var
(
name
=
"gradient_merge_k"
,
name
=
"gradient_merge_k"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
self
.
k_steps
),
value
=
int
(
self
.
k_steps
),
...
@@ -7507,7 +7508,7 @@ class GradientMergeOptimizer:
...
@@ -7507,7 +7508,7 @@ class GradientMergeOptimizer:
force_cpu
=
True
,
force_cpu
=
True
,
)
)
zero_var
=
layers
.
create_global_var
(
zero_var
=
paddle
.
static
.
create_global_var
(
name
=
"gradient_merge_zero"
,
name
=
"gradient_merge_zero"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
0
),
value
=
int
(
0
),
...
@@ -7517,7 +7518,7 @@ class GradientMergeOptimizer:
...
@@ -7517,7 +7518,7 @@ class GradientMergeOptimizer:
)
)
# Add step var & cond var
# Add step var & cond var
step_var
=
layers
.
create_global_var
(
step_var
=
paddle
.
static
.
create_global_var
(
name
=
"gradient_merge_step"
,
name
=
"gradient_merge_step"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
int
(
0
),
value
=
int
(
0
),
...
...
python/paddle/fluid/tests/test_python_operator_overriding.py
浏览文件 @
5c64d84f
...
@@ -19,7 +19,6 @@ import numpy as np
...
@@ -19,7 +19,6 @@ import numpy as np
import
paddle
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.framework
as
framework
import
paddle.fluid.framework
as
framework
import
paddle.fluid.layers
as
layers
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -32,10 +31,10 @@ class TestPythonOperatorOverride(unittest.TestCase):
...
@@ -32,10 +31,10 @@ class TestPythonOperatorOverride(unittest.TestCase):
y_data
=
np
.
random
.
random
(
size
=
shape
).
astype
(
dtype
)
y_data
=
np
.
random
.
random
(
size
=
shape
).
astype
(
dtype
)
python_out
=
fn
(
x_data
,
y_data
)
python_out
=
fn
(
x_data
,
y_data
)
x_var
=
layers
.
create_global_var
(
x_var
=
paddle
.
static
.
create_global_var
(
name
=
'x'
,
shape
=
shape
,
value
=
0.0
,
dtype
=
dtype
,
persistable
=
True
name
=
'x'
,
shape
=
shape
,
value
=
0.0
,
dtype
=
dtype
,
persistable
=
True
)
)
y_var
=
layers
.
create_global_var
(
y_var
=
paddle
.
static
.
create_global_var
(
name
=
'y'
,
shape
=
shape
,
value
=
0.0
,
dtype
=
dtype
,
persistable
=
True
name
=
'y'
,
shape
=
shape
,
value
=
0.0
,
dtype
=
dtype
,
persistable
=
True
)
)
out
=
fn
(
x_var
,
y_var
)
out
=
fn
(
x_var
,
y_var
)
...
...
python/paddle/fluid/tests/unittests/dist_transformer.py
浏览文件 @
5c64d84f
...
@@ -289,7 +289,7 @@ class LearningRateScheduler:
...
@@ -289,7 +289,7 @@ class LearningRateScheduler:
self
.
warmup_steps
=
warmup_steps
self
.
warmup_steps
=
warmup_steps
self
.
d_model
=
d_model
self
.
d_model
=
d_model
self
.
static_lr
=
learning_rate
self
.
static_lr
=
learning_rate
self
.
learning_rate
=
layers
.
create_global_var
(
self
.
learning_rate
=
paddle
.
static
.
create_global_var
(
name
=
name
,
name
=
name
,
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
learning_rate
),
value
=
float
(
learning_rate
),
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_bmn.py
浏览文件 @
5c64d84f
...
@@ -308,7 +308,7 @@ def bmn_loss_func(
...
@@ -308,7 +308,7 @@ def bmn_loss_func(
]
]
bm_mask
.
append
(
mask_vector
)
bm_mask
.
append
(
mask_vector
)
bm_mask
=
np
.
array
(
bm_mask
,
dtype
=
np
.
float32
)
bm_mask
=
np
.
array
(
bm_mask
,
dtype
=
np
.
float32
)
self_bm_mask
=
fluid
.
layers
.
create_global_var
(
self_bm_mask
=
paddle
.
static
.
create_global_var
(
shape
=
[
dscale
,
tscale
],
value
=
0
,
dtype
=
DATATYPE
,
persistable
=
True
shape
=
[
dscale
,
tscale
],
value
=
0
,
dtype
=
DATATYPE
,
persistable
=
True
)
)
fluid
.
layers
.
assign
(
bm_mask
,
self_bm_mask
)
fluid
.
layers
.
assign
(
bm_mask
,
self_bm_mask
)
...
...
python/paddle/fluid/tests/unittests/npu/test_adam_op_npu.py
浏览文件 @
5c64d84f
...
@@ -354,21 +354,21 @@ class TestNetWithEpsilonTensor(unittest.TestCase):
...
@@ -354,21 +354,21 @@ class TestNetWithEpsilonTensor(unittest.TestCase):
beta2_init
=
0.999
beta2_init
=
0.999
epsilon_init
=
1e-8
epsilon_init
=
1e-8
if
use_tensor
:
if
use_tensor
:
beta1
=
fluid
.
layers
.
create_global_var
(
beta1
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
beta1_init
),
value
=
float
(
beta1_init
),
dtype
=
'float32'
,
dtype
=
'float32'
,
persistable
=
True
,
persistable
=
True
,
name
=
"beta1"
,
name
=
"beta1"
,
)
)
beta2
=
fluid
.
layers
.
create_global_var
(
beta2
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
beta2_init
),
value
=
float
(
beta2_init
),
dtype
=
'float32'
,
dtype
=
'float32'
,
persistable
=
True
,
persistable
=
True
,
name
=
"beta2"
,
name
=
"beta2"
,
)
)
epsilon
=
fluid
.
layers
.
create_global_var
(
epsilon
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
epsilon_init
),
value
=
float
(
epsilon_init
),
dtype
=
'float32'
,
dtype
=
'float32'
,
...
...
python/paddle/fluid/tests/unittests/test_adam_op.py
浏览文件 @
5c64d84f
...
@@ -616,10 +616,10 @@ class TestAdamOpV2(unittest.TestCase):
...
@@ -616,10 +616,10 @@ class TestAdamOpV2(unittest.TestCase):
conv
=
fluid
.
layers
.
conv2d
(
data
,
8
,
3
)
conv
=
fluid
.
layers
.
conv2d
(
data
,
8
,
3
)
loss
=
paddle
.
mean
(
conv
)
loss
=
paddle
.
mean
(
conv
)
beta1
=
fluid
.
layers
.
create_global_var
(
beta1
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
0.85
,
dtype
=
'float32'
,
persistable
=
True
shape
=
[
1
],
value
=
0.85
,
dtype
=
'float32'
,
persistable
=
True
)
)
beta2
=
fluid
.
layers
.
create_global_var
(
beta2
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
0.95
,
dtype
=
'float32'
,
persistable
=
True
shape
=
[
1
],
value
=
0.95
,
dtype
=
'float32'
,
persistable
=
True
)
)
betas
=
[
beta1
,
beta2
]
betas
=
[
beta1
,
beta2
]
...
@@ -711,7 +711,7 @@ class TestAdamOpV2(unittest.TestCase):
...
@@ -711,7 +711,7 @@ class TestAdamOpV2(unittest.TestCase):
cur_lr
=
adam
.
get_lr
()
cur_lr
=
adam
.
get_lr
()
assert
lr
==
cur_lr
assert
lr
==
cur_lr
with
self
.
assertRaises
(
TypeError
):
with
self
.
assertRaises
(
TypeError
):
lr_var
=
paddle
.
fluid
.
layers
.
create_global_var
(
lr_var
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
lr
,
dtype
=
'float32'
shape
=
[
1
],
value
=
lr
,
dtype
=
'float32'
)
)
adam
.
set_lr
(
lr_var
)
adam
.
set_lr
(
lr_var
)
...
@@ -817,21 +817,21 @@ class TestAdamOptimizer(unittest.TestCase):
...
@@ -817,21 +817,21 @@ class TestAdamOptimizer(unittest.TestCase):
beta2_init
=
0.999
beta2_init
=
0.999
epsilon_init
=
1e-8
epsilon_init
=
1e-8
if
use_tensor
:
if
use_tensor
:
beta1
=
fluid
.
layers
.
create_global_var
(
beta1
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
beta1_init
),
value
=
float
(
beta1_init
),
dtype
=
'float32'
,
dtype
=
'float32'
,
persistable
=
True
,
persistable
=
True
,
name
=
"beta1"
,
name
=
"beta1"
,
)
)
beta2
=
fluid
.
layers
.
create_global_var
(
beta2
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
beta2_init
),
value
=
float
(
beta2_init
),
dtype
=
'float32'
,
dtype
=
'float32'
,
persistable
=
True
,
persistable
=
True
,
name
=
"beta2"
,
name
=
"beta2"
,
)
)
epsilon
=
fluid
.
layers
.
create_global_var
(
epsilon
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
epsilon_init
),
value
=
float
(
epsilon_init
),
dtype
=
'float32'
,
dtype
=
'float32'
,
...
...
python/paddle/fluid/tests/unittests/test_adamw_op.py
浏览文件 @
5c64d84f
...
@@ -212,10 +212,10 @@ class TestAdamWOp(unittest.TestCase):
...
@@ -212,10 +212,10 @@ class TestAdamWOp(unittest.TestCase):
conv
=
fluid
.
layers
.
conv2d
(
data
,
8
,
3
)
conv
=
fluid
.
layers
.
conv2d
(
data
,
8
,
3
)
loss
=
paddle
.
mean
(
conv
)
loss
=
paddle
.
mean
(
conv
)
beta1
=
fluid
.
layers
.
create_global_var
(
beta1
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
0.85
,
dtype
=
'float32'
,
persistable
=
True
shape
=
[
1
],
value
=
0.85
,
dtype
=
'float32'
,
persistable
=
True
)
)
beta2
=
fluid
.
layers
.
create_global_var
(
beta2
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
0.95
,
dtype
=
'float32'
,
persistable
=
True
shape
=
[
1
],
value
=
0.95
,
dtype
=
'float32'
,
persistable
=
True
)
)
betas
=
[
beta1
,
beta2
]
betas
=
[
beta1
,
beta2
]
...
...
python/paddle/fluid/tests/unittests/test_create_global_var.py
浏览文件 @
5c64d84f
...
@@ -16,7 +16,7 @@ import unittest
...
@@ -16,7 +16,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
.fluid
as
fluid
import
paddle
from
paddle.fluid
import
Program
,
program_guard
from
paddle.fluid
import
Program
,
program_guard
...
@@ -25,19 +25,19 @@ class TestCreateGlobalVarError(unittest.TestCase):
...
@@ -25,19 +25,19 @@ class TestCreateGlobalVarError(unittest.TestCase):
with
program_guard
(
Program
(),
Program
()):
with
program_guard
(
Program
(),
Program
()):
def
test_shape
():
def
test_shape
():
fluid
.
layers
.
create_global_var
(
1
,
2.0
,
np
.
float32
)
paddle
.
static
.
create_global_var
(
1
,
2.0
,
np
.
float32
)
self
.
assertRaises
(
TypeError
,
test_shape
)
self
.
assertRaises
(
TypeError
,
test_shape
)
def
test_shape_item
():
def
test_shape_item
():
fluid
.
layers
.
create_global_var
([
1.0
,
2.0
,
3.0
],
2.0
,
'float32'
)
paddle
.
static
.
create_global_var
([
1.0
,
2.0
,
3.0
],
2.0
,
'float32'
)
self
.
assertRaises
(
TypeError
,
test_shape_item
)
self
.
assertRaises
(
TypeError
,
test_shape_item
)
# Since create_global_var support all dtype in convert_dtype().
# Since create_global_var support all dtype in convert_dtype().
# Hence, assertRaises ValueError not TypeError.
# Hence, assertRaises ValueError not TypeError.
def
test_dtype
():
def
test_dtype
():
fluid
.
layers
.
create_global_var
([
1
,
2
,
3
],
2.0
,
np
.
complex128
)
paddle
.
static
.
create_global_var
([
1
,
2
,
3
],
2.0
,
np
.
complex128
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
self
.
assertRaises
(
TypeError
,
test_dtype
)
...
...
python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py
浏览文件 @
5c64d84f
...
@@ -542,7 +542,7 @@ class PaddingRNNTestBase(unittest.TestCase):
...
@@ -542,7 +542,7 @@ class PaddingRNNTestBase(unittest.TestCase):
)
)
)
)
self
.
learning_rate
=
fluid
.
layers
.
create_global_var
(
self
.
learning_rate
=
paddle
.
static
.
create_global_var
(
name
=
"learning_rate"
,
name
=
"learning_rate"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
1.0
,
value
=
1.0
,
...
...
python/paddle/fluid/tests/unittests/test_fleet_metric.py
浏览文件 @
5c64d84f
...
@@ -79,14 +79,14 @@ class TestFleetMetric(unittest.TestCase):
...
@@ -79,14 +79,14 @@ class TestFleetMetric(unittest.TestCase):
train
=
fluid
.
Program
()
train
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
startup
=
fluid
.
Program
()
with
fluid
.
program_guard
(
train
,
startup
):
with
fluid
.
program_guard
(
train
,
startup
):
t
=
fluid
.
layers
.
create_global_var
(
t
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
,
1
],
shape
=
[
1
,
1
],
value
=
1
,
value
=
1
,
dtype
=
'int64'
,
dtype
=
'int64'
,
persistable
=
True
,
persistable
=
True
,
force_cpu
=
True
,
force_cpu
=
True
,
)
)
t1
=
fluid
.
layers
.
create_global_var
(
t1
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
,
1
],
shape
=
[
1
,
1
],
value
=
1
,
value
=
1
,
dtype
=
'int64'
,
dtype
=
'int64'
,
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer.py
浏览文件 @
5c64d84f
...
@@ -595,7 +595,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
...
@@ -595,7 +595,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
lr
=
adam
.
current_step_lr
()
lr
=
adam
.
current_step_lr
()
np
.
testing
.
assert_allclose
(
lr
,
lr_list
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
np
.
testing
.
assert_allclose
(
lr
,
lr_list
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
lr_var
=
fluid
.
layers
.
create_global_var
(
lr_var
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
0.7
,
dtype
=
'float32'
shape
=
[
1
],
value
=
0.7
,
dtype
=
'float32'
)
)
adam
.
set_lr
(
lr_var
)
adam
.
set_lr
(
lr_var
)
...
...
python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py
浏览文件 @
5c64d84f
...
@@ -721,7 +721,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
...
@@ -721,7 +721,7 @@ class TestOptimizerLearningRate(unittest.TestCase):
np
.
testing
.
assert_allclose
(
lr
,
lr_list
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
np
.
testing
.
assert_allclose
(
lr
,
lr_list
[
i
],
rtol
=
1e-06
,
atol
=
0.0
)
with
self
.
assertRaises
(
TypeError
):
with
self
.
assertRaises
(
TypeError
):
lr_var
=
fluid
.
layers
.
create_global_var
(
lr_var
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=
0.7
,
dtype
=
'float32'
shape
=
[
1
],
value
=
0.7
,
dtype
=
'float32'
)
)
adam
.
set_lr
(
lr_var
)
adam
.
set_lr
(
lr_var
)
...
...
python/paddle/fluid/tests/unittests/test_lr_scheduler.py
浏览文件 @
5c64d84f
...
@@ -109,7 +109,7 @@ class TestReduceOnPlateauDecay:
...
@@ -109,7 +109,7 @@ class TestReduceOnPlateauDecay:
main_prog
=
paddle
.
static
.
Program
()
main_prog
=
paddle
.
static
.
Program
()
start_prog
=
paddle
.
static
.
Program
()
start_prog
=
paddle
.
static
.
Program
()
with
paddle
.
static
.
program_guard
(
main_prog
,
start_prog
):
with
paddle
.
static
.
program_guard
(
main_prog
,
start_prog
):
x
=
fluid
.
layers
.
create_global_var
(
x
=
paddle
.
static
.
create_global_var
(
[
1
],
1
,
'float32'
,
persistable
=
True
[
1
],
1
,
'float32'
,
persistable
=
True
)
)
paddle
.
increment
(
x
)
paddle
.
increment
(
x
)
...
...
python/paddle/fluid/tests/unittests/test_parallel_executor_feed_persistable_var.py
浏览文件 @
5c64d84f
...
@@ -39,7 +39,7 @@ class TestFeedPersistableVar(unittest.TestCase):
...
@@ -39,7 +39,7 @@ class TestFeedPersistableVar(unittest.TestCase):
}
}
def
optimizer
(
self
):
def
optimizer
(
self
):
learning_rate
=
fluid
.
layers
.
create_global_var
(
learning_rate
=
paddle
.
static
.
create_global_var
(
name
=
"learning_rate"
,
name
=
"learning_rate"
,
shape
=
[
1
],
shape
=
[
1
],
value
=
1.0
,
value
=
1.0
,
...
...
python/paddle/fluid/tests/unittests/test_queue.py
浏览文件 @
5c64d84f
...
@@ -16,9 +16,9 @@ import unittest
...
@@ -16,9 +16,9 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
import
paddle.fluid.layers
as
layers
class
TestQueue
(
unittest
.
TestCase
):
class
TestQueue
(
unittest
.
TestCase
):
...
@@ -31,14 +31,14 @@ class TestQueue(unittest.TestCase):
...
@@ -31,14 +31,14 @@ class TestQueue(unittest.TestCase):
startup_program
=
fluid
.
Program
()
startup_program
=
fluid
.
Program
()
value
=
np
.
random
.
rand
(
1
)
value
=
np
.
random
.
rand
(
1
)
with
fluid
.
program_guard
(
main_program
,
startup_program
):
with
fluid
.
program_guard
(
main_program
,
startup_program
):
data_in
=
layers
.
create_global_var
(
data_in
=
paddle
.
static
.
create_global_var
(
shape
=
[
2
,
3
],
shape
=
[
2
,
3
],
value
=
value
,
value
=
value
,
dtype
=
"float32"
,
dtype
=
"float32"
,
persistable
=
True
,
persistable
=
True
,
name
=
'var_in'
,
name
=
'var_in'
,
)
)
data_out
=
layers
.
create_global_var
(
data_out
=
paddle
.
static
.
create_global_var
(
shape
=
[
2
,
3
],
shape
=
[
2
,
3
],
value
=
value
-
1.0
,
value
=
value
-
1.0
,
dtype
=
"float32"
,
dtype
=
"float32"
,
...
...
python/paddle/fluid/tests/unittests/test_switch.py
浏览文件 @
5c64d84f
...
@@ -30,7 +30,7 @@ class TestSwitch(unittest.TestCase):
...
@@ -30,7 +30,7 @@ class TestSwitch(unittest.TestCase):
two_var
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
2.0
)
two_var
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
2.0
)
three_var
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
3.0
)
three_var
=
layers
.
fill_constant
(
shape
=
[
1
],
dtype
=
'float32'
,
value
=
3.0
)
result
=
layers
.
create_global_var
(
result
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=-
1.0
,
dtype
=
'float32'
,
persistable
=
True
shape
=
[
1
],
value
=-
1.0
,
dtype
=
'float32'
,
persistable
=
True
)
)
...
@@ -71,7 +71,7 @@ class TestSwitchCaseError(unittest.TestCase):
...
@@ -71,7 +71,7 @@ class TestSwitchCaseError(unittest.TestCase):
shape
=
[
1
],
dtype
=
'float32'
,
value
=
0.0
shape
=
[
1
],
dtype
=
'float32'
,
value
=
0.0
)
)
result
=
layers
.
create_global_var
(
result
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
value
=-
1.0
,
dtype
=
'float32'
,
persistable
=
True
shape
=
[
1
],
value
=-
1.0
,
dtype
=
'float32'
,
persistable
=
True
)
)
...
...
python/paddle/fluid/tests/unittests/test_var_info.py
浏览文件 @
5c64d84f
...
@@ -20,7 +20,7 @@ import unittest
...
@@ -20,7 +20,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
import
paddle
.fluid
as
fluid
import
paddle
class
TestVarInfo
(
unittest
.
TestCase
):
class
TestVarInfo
(
unittest
.
TestCase
):
...
@@ -29,7 +29,7 @@ class TestVarInfo(unittest.TestCase):
...
@@ -29,7 +29,7 @@ class TestVarInfo(unittest.TestCase):
def
test_var_info
(
self
):
def
test_var_info
(
self
):
"""Testcase for get and set info for variable."""
"""Testcase for get and set info for variable."""
value
=
np
.
random
.
randn
(
1
)
value
=
np
.
random
.
randn
(
1
)
var
=
fluid
.
layers
.
create_global_var
([
1
],
value
,
"float32"
)
var
=
paddle
.
static
.
create_global_var
([
1
],
value
,
"float32"
)
var
.
_set_info
(
"name"
,
"test"
)
var
.
_set_info
(
"name"
,
"test"
)
ret
=
var
.
_get_info
(
"name"
)
ret
=
var
.
_get_info
(
"name"
)
assert
ret
==
"test"
assert
ret
==
"test"
...
...
python/paddle/fluid/tests/unittests/xpu/test_adamw_op_xpu.py
浏览文件 @
5c64d84f
...
@@ -199,13 +199,13 @@ class XPUTestAdamwOp2(XPUOpTestWrapper):
...
@@ -199,13 +199,13 @@ class XPUTestAdamwOp2(XPUOpTestWrapper):
conv
=
fluid
.
layers
.
conv2d
(
data
,
8
,
3
)
conv
=
fluid
.
layers
.
conv2d
(
data
,
8
,
3
)
loss
=
paddle
.
mean
(
conv
)
loss
=
paddle
.
mean
(
conv
)
beta1
=
fluid
.
layers
.
create_global_var
(
beta1
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
0.85
,
value
=
0.85
,
dtype
=
self
.
in_type_str
,
dtype
=
self
.
in_type_str
,
persistable
=
True
,
persistable
=
True
,
)
)
beta2
=
fluid
.
layers
.
create_global_var
(
beta2
=
paddle
.
static
.
create_global_var
(
shape
=
[
1
],
shape
=
[
1
],
value
=
0.95
,
value
=
0.95
,
dtype
=
self
.
in_type_str
,
dtype
=
self
.
in_type_str
,
...
...
python/paddle/incubate/optimizer/distributed_fused_lamb.py
浏览文件 @
5c64d84f
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
import
os
import
os
import
paddle
import
paddle
from
paddle.fluid
import
core
,
framework
,
layers
,
unique_name
from
paddle.fluid
import
core
,
framework
,
unique_name
from
paddle.fluid.clip
import
ClipGradByGlobalNorm
from
paddle.fluid.clip
import
ClipGradByGlobalNorm
from
paddle.fluid.executor
import
global_scope
from
paddle.fluid.executor
import
global_scope
from
paddle.fluid.framework
import
Variable
,
name_scope
from
paddle.fluid.framework
import
Variable
,
name_scope
...
@@ -172,7 +172,7 @@ class DistributedFusedLamb(Optimizer):
...
@@ -172,7 +172,7 @@ class DistributedFusedLamb(Optimizer):
def
_create_scale_from_constant
(
self
,
value
):
def
_create_scale_from_constant
(
self
,
value
):
name
=
unique_name
.
generate
(
'global_scale'
)
name
=
unique_name
.
generate
(
'global_scale'
)
return
layers
.
create_global_var
(
return
paddle
.
static
.
create_global_var
(
name
=
name
,
name
=
name
,
shape
=
[
1
],
shape
=
[
1
],
dtype
=
'float32'
,
dtype
=
'float32'
,
...
...
python/paddle/incubate/optimizer/lookahead.py
浏览文件 @
5c64d84f
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
# limitations under the License.
# limitations under the License.
import
paddle
import
paddle
from
paddle.fluid
import
framework
,
layers
,
unique_name
from
paddle.fluid
import
framework
,
unique_name
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.dygraph
import
base
as
imperative_base
from
paddle.fluid.framework
import
Variable
from
paddle.fluid.framework
import
Variable
from
paddle.fluid.layer_helper
import
LayerHelper
from
paddle.fluid.layer_helper
import
LayerHelper
...
@@ -192,7 +192,7 @@ class LookAhead(Optimizer):
...
@@ -192,7 +192,7 @@ class LookAhead(Optimizer):
def
_increment_global_var
(
self
):
def
_increment_global_var
(
self
):
if
self
.
_global_step_var
is
None
:
if
self
.
_global_step_var
is
None
:
self
.
_global_step_var
=
layers
.
create_global_var
(
self
.
_global_step_var
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"lookahead_step"
),
name
=
unique_name
.
generate
(
"lookahead_step"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
0
,
value
=
0
,
...
@@ -212,7 +212,7 @@ class LookAhead(Optimizer):
...
@@ -212,7 +212,7 @@ class LookAhead(Optimizer):
zero_var
=
paddle
.
zeros
(
zero_var
=
paddle
.
zeros
(
shape
=
[
1
],
dtype
=
'int32'
,
name
=
'lookahead_zeros'
shape
=
[
1
],
dtype
=
'int32'
,
name
=
'lookahead_zeros'
)
)
k_var
=
layers
.
create_global_var
(
k_var
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"lookahead_k"
),
name
=
unique_name
.
generate
(
"lookahead_k"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
self
.
k
,
value
=
self
.
k
,
...
...
python/paddle/optimizer/adam.py
浏览文件 @
5c64d84f
...
@@ -18,7 +18,7 @@ from collections import defaultdict
...
@@ -18,7 +18,7 @@ from collections import defaultdict
import
paddle
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
..fluid
import
core
,
framework
,
layers
,
unique_name
from
..fluid
import
core
,
framework
,
unique_name
from
..fluid.dygraph
import
base
as
imperative_base
from
..fluid.dygraph
import
base
as
imperative_base
from
..fluid.framework
import
Variable
,
in_dygraph_mode
from
..fluid.framework
import
Variable
,
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
...
@@ -233,7 +233,7 @@ class Adam(Optimizer):
...
@@ -233,7 +233,7 @@ class Adam(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
...
python/paddle/optimizer/adamw.py
浏览文件 @
5c64d84f
...
@@ -19,7 +19,7 @@ from collections.abc import Callable
...
@@ -19,7 +19,7 @@ from collections.abc import Callable
import
paddle
import
paddle
from
..
import
_C_ops
,
_legacy_C_ops
from
..
import
_C_ops
,
_legacy_C_ops
from
..fluid
import
core
,
framework
,
layers
,
unique_name
from
..fluid
import
core
,
framework
,
unique_name
from
..fluid.clip
import
GradientClipBase
from
..fluid.clip
import
GradientClipBase
from
..fluid.dygraph
import
base
as
imperative_base
from
..fluid.dygraph
import
base
as
imperative_base
from
..fluid.framework
import
Parameter
,
Variable
from
..fluid.framework
import
Parameter
,
Variable
...
@@ -338,7 +338,7 @@ class AdamW(Optimizer):
...
@@ -338,7 +338,7 @@ class AdamW(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
...
python/paddle/optimizer/lamb.py
浏览文件 @
5c64d84f
...
@@ -12,10 +12,11 @@
...
@@ -12,10 +12,11 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle.fluid.executor
import
global_scope
from
paddle.fluid.executor
import
global_scope
from
..fluid
import
core
,
framework
,
layers
,
unique_name
from
..fluid
import
core
,
framework
,
unique_name
from
..fluid.framework
import
Variable
from
..fluid.framework
import
Variable
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
.optimizer
import
Optimizer
from
.optimizer
import
Optimizer
...
@@ -162,7 +163,7 @@ class Lamb(Optimizer):
...
@@ -162,7 +163,7 @@ class Lamb(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
...
python/paddle/optimizer/momentum.py
浏览文件 @
5c64d84f
...
@@ -19,7 +19,7 @@ from paddle import _C_ops, _legacy_C_ops
...
@@ -19,7 +19,7 @@ from paddle import _C_ops, _legacy_C_ops
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
paddle.fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
paddle.fluid.regularizer
import
L2DecayRegularizer
from
paddle.fluid.regularizer
import
L2DecayRegularizer
from
..fluid
import
core
,
framework
,
layers
,
unique_name
from
..fluid
import
core
,
framework
,
unique_name
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
from
.optimizer
import
Optimizer
from
.optimizer
import
Optimizer
...
@@ -209,7 +209,7 @@ class Momentum(Optimizer):
...
@@ -209,7 +209,7 @@ class Momentum(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
5c64d84f
...
@@ -31,7 +31,7 @@ from paddle.fluid.framework import (
...
@@ -31,7 +31,7 @@ from paddle.fluid.framework import (
name_scope
,
name_scope
,
)
)
from
..fluid
import
framework
,
layers
,
unique_name
from
..fluid
import
framework
,
unique_name
from
..fluid.backward
import
_get_no_grad_set_name
,
append_backward
from
..fluid.backward
import
_get_no_grad_set_name
,
append_backward
from
..fluid.clip
import
(
from
..fluid.clip
import
(
GradientClipBase
,
GradientClipBase
,
...
@@ -469,7 +469,7 @@ class Optimizer:
...
@@ -469,7 +469,7 @@ class Optimizer:
else
:
else
:
self
.
_learning_rate_map
[
self
.
_learning_rate_map
[
framework
.
default_main_program
()
framework
.
default_main_program
()
]
=
layers
.
create_global_var
(
]
=
paddle
.
static
.
create_global_var
(
name
=
unique_name
.
generate
(
"learning_rate"
),
name
=
unique_name
.
generate
(
"learning_rate"
),
shape
=
[
1
],
shape
=
[
1
],
value
=
float
(
self
.
_learning_rate
),
value
=
float
(
self
.
_learning_rate
),
...
...
python/paddle/optimizer/sgd.py
浏览文件 @
5c64d84f
...
@@ -14,9 +14,10 @@
...
@@ -14,9 +14,10 @@
import
warnings
import
warnings
import
paddle
from
paddle
import
_C_ops
,
_legacy_C_ops
from
paddle
import
_C_ops
,
_legacy_C_ops
from
..fluid
import
core
,
framework
,
layers
,
unique_name
from
..fluid
import
core
,
framework
,
unique_name
from
..fluid.dygraph
import
no_grad
from
..fluid.dygraph
import
no_grad
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.framework
import
_in_legacy_dygraph
,
in_dygraph_mode
from
..fluid.layer_helper
import
LayerHelper
from
..fluid.layer_helper
import
LayerHelper
...
@@ -101,7 +102,7 @@ class SGD(Optimizer):
...
@@ -101,7 +102,7 @@ class SGD(Optimizer):
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
param
.
name
+
"_fp32_master"
var_name
=
unique_name
.
generate
(
var_name
)
var_name
=
unique_name
.
generate
(
var_name
)
var
=
layers
.
create_global_var
(
var
=
paddle
.
static
.
create_global_var
(
name
=
var_name
,
name
=
var_name
,
shape
=
param
.
shape
,
shape
=
param
.
shape
,
value
=
0
,
value
=
0
,
...
...
python/paddle/static/__init__.py
浏览文件 @
5c64d84f
...
@@ -33,6 +33,7 @@ from .input import data # noqa: F401
...
@@ -33,6 +33,7 @@ from .input import data # noqa: F401
from
.input
import
InputSpec
# noqa: F401
from
.input
import
InputSpec
# noqa: F401
from
..tensor.creation
import
create_parameter
# noqa: F401
from
..tensor.creation
import
create_parameter
# noqa: F401
from
..tensor.creation
import
create_global_var
# noqa: F401
from
..fluid.executor
import
Executor
# noqa: F401
from
..fluid.executor
import
Executor
# noqa: F401
from
..fluid.executor
import
global_scope
# noqa: F401
from
..fluid.executor
import
global_scope
# noqa: F401
...
@@ -70,7 +71,6 @@ from ..fluid.io import load_vars # noqa: F401
...
@@ -70,7 +71,6 @@ from ..fluid.io import load_vars # noqa: F401
from
..fluid.io
import
save_vars
# noqa: F401
from
..fluid.io
import
save_vars
# noqa: F401
from
..fluid.io
import
batch
# noqa: F401
from
..fluid.io
import
batch
# noqa: F401
from
..fluid.layers
import
create_global_var
# noqa: F401
from
..fluid.contrib.layers
import
ctr_metric_bundle
# noqa: F401
from
..fluid.contrib.layers
import
ctr_metric_bundle
# noqa: F401
from
..fluid.layers
import
exponential_decay
# noqa: F401
from
..fluid.layers
import
exponential_decay
# noqa: F401
...
...
python/paddle/tensor/creation.py
浏览文件 @
5c64d84f
...
@@ -36,7 +36,7 @@ from ..fluid.framework import (
...
@@ -36,7 +36,7 @@ from ..fluid.framework import (
_in_legacy_dygraph
,
_in_legacy_dygraph
,
device_guard
,
device_guard
,
)
)
from
..fluid.initializer
import
Initializer
from
..fluid.initializer
import
Constant
,
Initializer
from
..fluid.layers
import
utils
from
..fluid.layers
import
utils
from
..fluid.param_attr
import
ParamAttr
from
..fluid.param_attr
import
ParamAttr
from
..framework
import
(
from
..framework
import
(
...
@@ -70,6 +70,84 @@ def _real_to_complex_dtype(dtype):
...
@@ -70,6 +70,84 @@ def _real_to_complex_dtype(dtype):
return
dtype
return
dtype
def
create_global_var
(
shape
,
value
,
dtype
,
persistable
=
False
,
force_cpu
=
False
,
name
=
None
):
"""
This function creates a new tensor variable with value in the global block(block 0).
Args:
shape (list[int]|tuple[int]): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
name (str, optional): For detailed information, please refer to
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
var = paddle.static.create_global_var(shape=[2,3], value=1.0, dtype='float32',
persistable=True, force_cpu=True, name='new_var')
"""
check_type
(
shape
,
'shape'
,
(
list
,
tuple
,
np
.
ndarray
),
'create_global_var'
)
for
item
in
shape
:
check_type
(
item
,
'item of shape'
,
(
int
,
np
.
uint8
,
np
.
int8
,
np
.
int16
,
np
.
int32
,
np
.
int64
,
),
'create_global_var'
,
)
check_dtype
(
dtype
,
'dtype'
,
[
'bool'
,
'float16'
,
'float32'
,
'float64'
,
'int8'
,
'int16'
,
'int32'
,
'int64'
,
'uint8'
,
'uint16'
,
],
'create_global_var'
,
)
helper
=
LayerHelper
(
"global_var"
,
**
locals
())
var
=
helper
.
create_global_variable
(
dtype
=
dtype
,
shape
=
shape
,
persistable
=
persistable
,
name
=
name
,
stop_gradient
=
True
,
)
helper
.
set_variable_initializer
(
var
,
initializer
=
Constant
(
value
=
float
(
value
),
force_cpu
=
force_cpu
)
)
return
var
def
create_parameter
(
def
create_parameter
(
shape
,
dtype
,
name
=
None
,
attr
=
None
,
is_bias
=
False
,
default_initializer
=
None
shape
,
dtype
,
name
=
None
,
attr
=
None
,
is_bias
=
False
,
default_initializer
=
None
):
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录