未验证 提交 4aacacb4 编写于 作者: C chentianyu03 提交者: GitHub

change paddle.fluid.layers.fill_constant to paddle.full in sample codes (#27993)

上级 01335815
develop 2.0.1-rocm-post Ligoml-patch-1 OliverLPH-patch-1 OliverLPH-patch-2 PaddlePM-patch-1 PaddlePM-patch-2 ZHUI-patch-1 add_default_att add_model_benchmark_ci add_some_yaml_config addfile all_new_design_exec ascendrc ascendrelease cherry_undefined_var compile_windows delete_2.0.1-rocm-post delete_add_default_att delete_all_new_design_exec delete_ascendrc delete_compile_windows delete_delete_addfile delete_disable_iterable_dataset_unittest delete_fix_dataloader_memory_leak delete_fix_imperative_dygraph_error delete_fix_retry_ci delete_fix_undefined_var delete_improve_sccache delete_paralleltest delete_prv-disable-more-cache delete_revert-31068-fix_conv3d_windows delete_revert-31562-mean delete_revert-33630-bug-fix delete_revert-34159-add_npu_bce_logical_dev delete_revert-34910-spinlocks_for_allocator delete_revert-35069-revert-34910-spinlocks_for_allocator delete_revert-36057-dev/read_flags_in_ut dingjiaweiww-patch-1 disable_iterable_dataset_unittest dy2static enable_eager_model_test final_state_gen_python_c final_state_intermediate fix-numpy-issue fix_concat_slice fix_dataloader_memory_leak fix_imperative_dygraph_error fix_npu_ci fix_op_flops fix_retry_ci fix_rnn_docs fix_tensor_type fix_undefined_var fixiscan fixiscan1 fixiscan2 fixiscan3 improve_sccache incubate/infrt inplace_addto make_flag_adding_easier move_embedding_to_phi move_histogram_to_pten move_sgd_to_phi move_slice_to_pten move_temporal_shift_to_phi move_yolo_box_to_phi npu_fix_alloc paralleltest preln_ernie prv-disable-more-cache prv-md-even-more prv-onednn-2.5 pten_tensor_refactor release/2.0 release/2.0-rc1 release/2.1 release/2.2 release/2.3 release/2.3-fc-ernie-fix release/2.4 revert-31068-fix_conv3d_windows revert-31562-mean revert-32290-develop-hardlabel revert-33037-forci revert-33475-fix_cifar_label_dimension revert-33630-bug-fix revert-34159-add_npu_bce_logical_dev revert-34406-add_copy_from_tensor revert-34910-spinlocks_for_allocator revert-35069-revert-34910-spinlocks_for_allocator revert-36057-dev/read_flags_in_ut revert-36201-refine_fast_threaded_ssa_graph_executor revert-36985-add_license revert-37318-refactor_dygraph_to_eager revert-37926-eager_coreops_500 revert-37956-revert-37727-pylayer_support_tuple revert-38100-mingdong revert-38301-allocation_rearrange_pr revert-38703-numpy_bf16_package_reupload revert-38732-remove_useless_header_in_elementwise_mul_grad revert-38959-Reduce_Grad revert-39143-adjust_empty revert-39227-move_trace_op_to_pten revert-39268-dev/remove_concat_fluid_kernel revert-40170-support_partial_grad revert-41056-revert-40727-move_some_activaion_to_phi revert-41065-revert-40993-mv_ele_floordiv_pow revert-41068-revert-40790-phi_new revert-41944-smaller_inference_api_test revert-42149-do-not-reset-default-stream-for-stream-safe-cuda-allocator revert-43155-fix_ut_tempfile revert-43882-revert-41944-smaller_inference_api_test revert-45808-phi/simplify_size_op revert-46827-deform_comment rocm_dev_0217 support_weight_transpose test_benchmark_ci test_model_benchmark test_model_benchmark_ci zhiqiu-patch-1 v2.4.0-rc0 v2.3.2 v2.3.1 v2.3.0 v2.3.0-rc0 v2.2.2 v2.2.1 v2.2.0 v2.2.0-rc0 v2.2.0-bak0 v2.1.3 v2.1.2 v2.1.1 v2.1.0 v2.1.0-rc0 v2.0.2 v2.0.1 v2.0.0 v2.0.0-rc1
无相关合并请求
......@@ -2506,21 +2506,21 @@ def case(pred_fn_pairs, default=None, name=None):
paddle.enable_static()
def fn_1():
return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
def fn_2():
return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
def fn_3():
return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
return paddle.full(shape=[3], dtype='int32', fill_value=3)
main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program):
x = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = paddle.fluid.layers.fill_constant(shape=[1], dtype='float32', value=0.2)
x = paddle.full(shape=[1], dtype='float32', fill_value=0.3)
y = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
......@@ -3626,19 +3626,19 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
paddle.enable_static()
def fn_1():
return paddle.fluid.layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
def fn_2():
return paddle.fluid.layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
def fn_3():
return paddle.fluid.layers.fill_constant(shape=[3], dtype='int32', value=3)
return paddle.full(shape=[3], dtype='int32', fill_value=3)
main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program):
index_1 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = paddle.fluid.layers.fill_constant(shape=[1], dtype='int32', value=2)
index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1)
index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2)
out_1 = paddle.static.nn.switch_case(
branch_index=index_1,
......
......@@ -312,7 +312,7 @@ def ones(shape, dtype=None, name=None):
# [1 1]]
# shape is a Tensor
shape = paddle.fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
shape = paddle.full(shape=[2], dtype='int32', fill_value=2)
data3 = paddle.ones(shape=shape, dtype='int32')
# [[1 1]
# [1 1]]
......@@ -393,7 +393,7 @@ def zeros(shape, dtype=None, name=None):
# [0. 0.]]
# shape is a Tensor
shape = paddle.fluid.layers.fill_constant(shape=[2], dtype='int32', value=2)
shape = paddle.full(shape=[2], dtype='int32', fill_value=2)
data3 = paddle.zeros(shape=shape, dtype='int32')
# [[0 0]
# [0 0]]
......@@ -521,18 +521,18 @@ def full(shape, fill_value, dtype=None, name=None):
# [0]]
# attr shape is a list which contains Tensor.
positive_2 = paddle.fluid.layers.fill_constant([1], "int32", 2)
positive_2 = paddle.full([1], 2, "int32")
data3 = paddle.full(shape=[1, positive_2], dtype='float32', fill_value=1.5)
# [[1.5 1.5]]
# attr shape is a Tensor.
shape = paddle.fluid.layers.fill_constant([2], "int32", 2)
shape = paddle.full([2], 2, "int32")
data4 = paddle.full(shape=shape, dtype='bool', fill_value=True)
# [[True True]
# [True True]]
# attr fill_value is a Tensor.
val = paddle.fluid.layers.fill_constant([1], "float32", 2.0)
val = paddle.full([1], 2.0, "float32")
data5 = paddle.full(shape=[2,1], fill_value=val, dtype='float32')
# [[2.0]
# [2.0]]
......
......@@ -170,7 +170,7 @@ def pow(x, y, name=None):
print(res.numpy()) # [1 4 9]
# example 2: y is a Tensor
y = paddle.fluid.layers.fill_constant(shape=[1], value=2, dtype='float32')
y = paddle.full(shape=[1], fill_value=2, dtype='float32')
res = paddle.pow(x, y)
print(res.numpy()) # [1 4 9]
......
......@@ -431,8 +431,8 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None):
# example 2:
# attr shape is a list which contains Tensor.
dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 2)
dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 3)
dim_1 = paddle.full([1], 2, "int64")
dim_2 = paddle.full([1], 3, "int32")
result_2 = paddle.tensor.random.uniform(shape=[dim_1, dim_2])
# [[-0.9951253, 0.30757582, 0.9899647 ],
# [ 0.5864527, 0.6607096, -0.8886161 ]]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部