From e48767fe83a208f584dff417e718d7ae54b3bd67 Mon Sep 17 00:00:00 2001 From: LiYuRio <63526175+LiYuRio@users.noreply.github.com> Date: Fri, 2 Dec 2022 16:05:50 +0800 Subject: [PATCH] remove less_than (#48584) --- .../fleet/utils/hybrid_parallel_inference.py | 4 +- python/paddle/fluid/layers/control_flow.py | 71 ++----------------- python/paddle/fluid/tests/test_if_else_op.py | 8 +-- .../auto_parallel/test_while_op_partition.py | 4 +- .../fleet/hybrid_parallel_inference_helper.py | 4 +- .../tests/unittests/dist_fleet_simnet_bow.py | 2 +- .../fluid/tests/unittests/dist_transformer.py | 4 +- .../ir/test_ir_subgraph_python_interface.py | 2 +- .../tests/unittests/npu/test_while_op_npu.py | 4 +- .../test_standalone_controlflow.py | 2 +- .../fluid/tests/unittests/test_assert_op.py | 2 +- .../paddle/fluid/tests/unittests/test_case.py | 10 +-- .../fluid/tests/unittests/test_compare_op.py | 7 +- .../paddle/fluid/tests/unittests/test_cond.py | 2 +- .../tests/unittests/test_device_guard.py | 2 +- .../tests/unittests/test_dist_fleet_ps.py | 2 +- .../tests/unittests/test_dist_fleet_ps11.py | 2 +- .../tests/unittests/test_dist_fleet_ps12.py | 2 +- .../tests/unittests/test_dist_fleet_ps13.py | 2 +- .../tests/unittests/test_dist_fleet_ps2.py | 2 +- .../tests/unittests/test_dist_fleet_ps3.py | 2 +- .../tests/unittests/test_dist_fleet_ps4.py | 2 +- .../tests/unittests/test_dist_fleet_ps5.py | 2 +- .../tests/unittests/test_dist_fleet_ps6.py | 2 +- .../test_dynamic_rnn_stop_gradient.py | 4 +- .../unittests/test_eager_deletion_while_op.py | 8 +-- .../tests/unittests/test_imperative_basic.py | 2 +- .../test_ir_memory_optimize_ifelse_op.py | 2 +- .../fluid/tests/unittests/test_layers.py | 22 +++--- .../fluid/tests/unittests/test_profiler.py | 4 +- .../tests/unittests/test_program_code.py | 3 +- .../fluid/tests/unittests/test_switch.py | 7 +- .../unittests/test_tensor_array_to_tensor.py | 2 +- .../tests/unittests/test_while_loop_op.py | 24 +++---- .../fluid/tests/unittests/test_while_op.py | 10 +-- .../unittests/xpu/test_device_guard_xpu.py | 2 +- .../tests/unittests/xpu/test_while_op_xpu.py | 10 +-- .../paddle/jit/dy2static/convert_operators.py | 3 +- 38 files changed, 94 insertions(+), 155 deletions(-) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index cd1d05e913..49aed0862f 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -129,7 +129,7 @@ class HybridParallelInferenceHelper: data = layers.array_write(X, step_idx) cond_int = layers.fill_constant(shape=[1], dtype="int64", value=0, force_cpu=False, name="cond_int") - cond = layers.less_than(x=step_idx, y=max_len) + cond = paddle.less_than(x=step_idx, y=max_len) while_op = layers.While(cond, is_test=True) with while_op.block(): @@ -153,7 +153,7 @@ class HybridParallelInferenceHelper: layers.array_write(hidden2, i=step_idx, array=data) # update cond and assign to cond_int, we will sync cond_int - layers.less_than(x=step_idx, y=max_len, cond=cond) + paddle.assign(paddle.less_than(x=step_idx, y=max_len), cond) layers.assign(layers.cast(cond, dtype="int32"), cond_int) with paddle.fluid.device_guard(f'{device}:all'): diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 80f0830e22..cd49f94e03 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -56,7 +56,6 @@ __all__ = [ 'Switch', 'increment', 'array_write', - 'less_than', 'array_read', 'cond', 'IfElse', @@ -1214,11 +1213,11 @@ class While: loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10) # loop length - cond = fluid.layers.less_than(x=i, y=loop_len) + cond = paddle.less_than(x=i, y=loop_len) while_op = fluid.layers.While(cond=cond) with while_op.block(): i = fluid.layers.increment(x=i, value=1, in_place=True) - fluid.layers.less_than(x=i, y=loop_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=loop_len), cond) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_startup_program()) @@ -1230,6 +1229,7 @@ class While: Examples 2: .. code-block:: python + import paddle import paddle.fluid as fluid import numpy as np @@ -1239,14 +1239,14 @@ class While: data = fluid.data(name='data', shape=[1], dtype='float32') sums = fluid.layers.fill_constant(shape=[1], dtype='float32', value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained - cond = fluid.layers.less_than(x=i, y=loop_len) + cond = paddle.less_than(x=i, y=loop_len) while_op = fluid.layers.While(cond=cond) with while_op.block(): sums_tensor = fluid.layers.elementwise_add(x=data, y=data) fluid.layers.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign i = fluid.layers.increment(x=i, value=1, in_place=True) data = fluid.layers.elementwise_add(x=data, y=one) - fluid.layers.less_than(x=i, y=loop_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=loop_len), cond) feed_data = np.ones(1).astype('float32') exe = fluid.Executor(fluid.CPUPlace()) @@ -1748,64 +1748,6 @@ def array_write(x, i, array=None): return array -@templatedoc() -def less_than(x, y, force_cpu=None, cond=None, name=None): - """ - - ${comment} - - Args: - x(Tensor): ${x_comment}. - y(Tensor): ${y_comment}. - force_cpu(${force_cpu_type}): ${force_cpu_comment}. - cond(Tensor, optional): Optional output which can be any created Tensor - that meets the requirements to store the result of *less_than*. - if cond is None, a new Tensor will be created to store the result. - name(str, optional): The default value is None. Normally there is no need for - user to set this property. For more information, please refer to :ref:`api_guide_Name`. - Returns: - ${out_comment}. - - Examples: - .. code-block:: python - - import paddle - - x = paddle.to_tensor([1, 2, 3, 4], dtype='float32') - y = paddle.to_tensor([2, 2, 1, 3], dtype='float32') - result = paddle.less_than(x, y) - print(result) # [True, False, False, False] - - """ - check_variable_and_dtype( - x, "x", ["float32", "float64", "int32", "int64"], "less_than" - ) - check_variable_and_dtype( - y, "y", ["float32", "float64", "int32", "int64"], "less_than" - ) - if cond is not None: - check_type(cond, "cond", Variable, "less_than") - if force_cpu is not None: - check_type(force_cpu, "force_cpu", bool, "less_than") - - helper = LayerHelper("less_than", **locals()) - if cond is None: - cond = helper.create_variable_for_type_inference(dtype='bool') - cond.stop_gradient = True - - attrs = dict() - if force_cpu is not None: - attrs['force_cpu'] = force_cpu - - helper.append_op( - type='less_than', - inputs={'X': [x], 'Y': [y]}, - outputs={'Out': [cond]}, - attrs=attrs, - ) - return cond - - def array_read(array, i): """ This OP is used to read data at the specified position from the input array @@ -1932,8 +1874,9 @@ class ConditionalBlock: Examples: .. code-block:: python + import paddle import paddle.fluid as fluid - cond = layers.less_than(x=label, y=limit) + cond = paddle.less_than(x=label, y=limit) true_image, false_image = layers.split_lod_tensor( input=image, mask=cond) true_cond = layers.ConditionalBlock([true_image]) diff --git a/python/paddle/fluid/tests/test_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py index 4a7f213465..1eba6cbb60 100644 --- a/python/paddle/fluid/tests/test_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -43,7 +43,7 @@ class TestMNISTIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant(shape=[1], dtype='int64', value=5) - cond = layers.less_than(x=label, y=limit) + cond = paddle.less_than(x=label, y=limit) true_image, false_image = split_lod_tensor(input=image, mask=cond) true_out = layers.create_tensor(dtype='float32') @@ -105,7 +105,7 @@ class TestMNISTIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant(shape=[1], dtype='int64', value=5) - cond = layers.less_than(x=label, y=limit) + cond = paddle.less_than(x=label, y=limit) ie = layers.IfElse(cond) with ie.true_block(): @@ -174,7 +174,7 @@ class TestIfElse(unittest.TestCase): cond = layers.fill_constant( [1], dtype='float32', value=self.cond_value ) - ifcond = layers.less_than(x=src, y=cond) + ifcond = paddle.less_than(x=src, y=cond) ie = layers.IfElse(ifcond) with ie.true_block(): true_target = ie.input(src) @@ -237,7 +237,7 @@ class TestIfElseError(unittest.TestCase): const_value = layers.fill_constant( [1], dtype='float32', value=123.0 ) - ifcond = layers.less_than(x=src, y=const_value) + ifcond = paddle.less_than(x=src, y=const_value) with self.assertRaises(TypeError): ie = layers.IfElse(set()) with self.assertRaises(TypeError): diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py index 8825abe332..fcfd783f71 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py @@ -171,7 +171,7 @@ def get_program(): # "dims_mapping": [-1, -1, -1] # }) - cond = fluid.layers.less_than(x=i, y=loop_len) + cond = paddle.less_than(x=i, y=loop_len) auto.shard_tensor(cond, _g_process_mesh, [None]) while_op = fluid.layers.While(cond=cond) @@ -191,7 +191,7 @@ def get_program(): # 更新循环条件 i = fluid.layers.increment(x=i, value=1, in_place=True) fluid.layers.array_write(cur_pred, array=input_array, i=i) - fluid.layers.less_than(x=i, y=loop_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=loop_len), cond) end_pred = fluid.layers.array_read(array=input_array, i=i) auto.shard_tensor(end_pred, _g_process_mesh, [None, None, None]) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py index 2cb6f3326e..542b1ba637 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_inference_helper.py @@ -83,7 +83,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): name="cond_int", ) print(cond_int.shape) - cond = layers.less_than(x=step_idx, y=max_len) + cond = paddle.less_than(x=step_idx, y=max_len) while_op = layers.While(cond, is_test=True) with while_op.block(): @@ -119,7 +119,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): layers.array_write(hidden2, i=step_idx, array=data) # update cond and assign to cond_int, we will sync cond_int - layers.less_than(x=step_idx, y=max_len, cond=cond) + paddle.assign(paddle.less_than(x=step_idx, y=max_len), cond) layers.assign(layers.cast(cond, dtype="int32"), cond_int) with paddle.fluid.device_guard(f'{device}:all'): diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py index 2c16fff90a..be2ea401ea 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_simnet_bow.py @@ -54,7 +54,7 @@ def fake_simnet_reader(): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py index 52b6f674e5..7106c426bc 100644 --- a/python/paddle/fluid/tests/unittests/dist_transformer.py +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -1762,7 +1762,7 @@ def fast_decode( step_idx = layers.fill_constant( shape=[1], dtype=start_tokens.dtype, value=0 ) - cond = layers.less_than(x=step_idx, y=max_len) + cond = paddle.less_than(x=step_idx, y=max_len) while_op = layers.While(cond) # array states will be stored for each step. ids = layers.array_write( @@ -1861,7 +1861,7 @@ def fast_decode( for i in range(n_layer): layers.assign(pre_caches[i]["k"], caches[i]["k"]) layers.assign(pre_caches[i]["v"], caches[i]["v"]) - length_cond = layers.less_than(x=step_idx, y=max_len) + length_cond = paddle.less_than(x=step_idx, y=max_len) finish_cond = paddle.logical_not(layers.is_empty(x=selected_ids)) paddle.logical_and(x=length_cond, y=finish_cond, out=cond) diff --git a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py index 2b4577a618..2a7ebae071 100644 --- a/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py +++ b/python/paddle/fluid/tests/unittests/ir/test_ir_subgraph_python_interface.py @@ -51,7 +51,7 @@ class TestQuantizationSubGraph(unittest.TestCase): with program_guard(main_program, startup_program): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) - pred = layers.less_than(y, x) + pred = paddle.less_than(y, x) out = layers.cond(pred, true_func, false_func) core_graph = core.Graph(main_program.desc) diff --git a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py index 1ff374fa9f..c63f11b859 100644 --- a/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py +++ b/python/paddle/fluid/tests/unittests/npu/test_while_op_npu.py @@ -73,7 +73,7 @@ class TestWhileOp(unittest.TestCase): i = layers.increment(x=i, in_place=True) layers.array_write(result, i=i, array=mem_array) - layers.less_than(x=i, y=array_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=array_len), cond) with while_op2.block(): d2 = layers.array_read(array=data_array, i=j) @@ -82,7 +82,7 @@ class TestWhileOp(unittest.TestCase): j = layers.increment(x=j, in_place=True) layers.array_write(result2, i=j, array=mem_array) - layers.less_than(x=j, y=array_len2, cond=cond2) + paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) loss = paddle.mean(sum_result) return loss, sum_result diff --git a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py index 81c1049c5b..c989ff866e 100644 --- a/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py +++ b/python/paddle/fluid/tests/unittests/standalone_executor/test_standalone_controlflow.py @@ -56,7 +56,7 @@ class TestCompatibility(unittest.TestCase): with program_guard(main_program, startup_program): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) - pred = layers.less_than(x, y) + pred = paddle.less_than(x, y) out = layers.cond(pred, true_func, false_func) # out is a tuple containing 2 tensors return main_program, startup_program, out diff --git a/python/paddle/fluid/tests/unittests/test_assert_op.py b/python/paddle/fluid/tests/unittests/test_assert_op.py index a006f99928..d59194aef5 100644 --- a/python/paddle/fluid/tests/unittests/test_assert_op.py +++ b/python/paddle/fluid/tests/unittests/test_assert_op.py @@ -61,7 +61,7 @@ class TestAssertOp(unittest.TestCase): def net_func(): zero = layers.fill_constant(shape=[1], dtype='int64', value=0) one = layers.fill_constant(shape=[1], dtype='int64', value=1) - condition = layers.less_than(one, zero) # False + condition = paddle.less_than(one, zero) # False layers.Assert(condition, [zero, one]) print("test_assert_print_data") diff --git a/python/paddle/fluid/tests/unittests/test_case.py b/python/paddle/fluid/tests/unittests/test_case.py index 777db3a348..3ab6e983d9 100644 --- a/python/paddle/fluid/tests/unittests/test_case.py +++ b/python/paddle/fluid/tests/unittests/test_case.py @@ -42,8 +42,8 @@ class TestAPICase(unittest.TestCase): x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) - pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 - pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 + pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 + pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 # call fn_1 out_0 = layers.case( @@ -200,8 +200,8 @@ class TestAPICase_Nested(unittest.TestCase): x = layers.fill_constant(shape=[1], dtype='float32', value=0.3) y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) - pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 - pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 + pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 + pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 out_1 = layers.case( pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3 @@ -239,7 +239,7 @@ class TestAPICase_Error(unittest.TestCase): with program_guard(main_program, startup_program): x = layers.fill_constant(shape=[1], dtype='float32', value=0.23) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) - pred_1 = layers.less_than(z, x) # true + pred_1 = paddle.less_than(z, x) # true # The type of 'pred_fn_pairs' in case must be list or tuple def type_error_pred_fn_pairs(): diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index 8cce73391d..fa98771ce1 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -44,12 +44,7 @@ def create_test_class(op_type, typename, callback): x = fluid.layers.data(name='x', shape=[2], dtype='int32') y = fluid.layers.data(name='y', shape=[2], dtype='int32') a = fluid.layers.data(name='a', shape=[2], dtype='int16') - if self.op_type == "less_than": - self.assertRaises( - TypeError, fluid.layers.less_than, x=x, y=y, force_cpu=1 - ) op = eval("paddle.%s" % self.op_type) - self.assertRaises(TypeError, op, x=x, y=y, cond=1) self.assertRaises(TypeError, op, x=x, y=a) self.assertRaises(TypeError, op, x=a, y=y) @@ -481,7 +476,7 @@ class TestCompareOpPlace(unittest.TestCase): place = paddle.CUDAPlace(0) label = fluid.layers.assign(np.array([3, 3], dtype="int32")) limit = fluid.layers.assign(np.array([3, 2], dtype="int32")) - out = fluid.layers.less_than(label, limit, force_cpu=True) + out = paddle.less_than(label, limit) exe = fluid.Executor(place) (res,) = exe.run(fetch_list=[out]) self.assertEqual((res == np.array([False, False])).all(), True) diff --git a/python/paddle/fluid/tests/unittests/test_cond.py b/python/paddle/fluid/tests/unittests/test_cond.py index d31ac885b1..bc5a73d048 100644 --- a/python/paddle/fluid/tests/unittests/test_cond.py +++ b/python/paddle/fluid/tests/unittests/test_cond.py @@ -53,7 +53,7 @@ class TestCondInputOutput(unittest.TestCase): with program_guard(main_program, startup_program): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) - pred = layers.less_than(y, x) + pred = paddle.less_than(y, x) out = layers.cond(pred, true_func, false_func) # out is one tensor diff --git a/python/paddle/fluid/tests/unittests/test_device_guard.py b/python/paddle/fluid/tests/unittests/test_device_guard.py index 34a029e7ba..d62893de97 100644 --- a/python/paddle/fluid/tests/unittests/test_device_guard.py +++ b/python/paddle/fluid/tests/unittests/test_device_guard.py @@ -156,7 +156,7 @@ class TestDeviceGuard(unittest.TestCase): while_op = fluid.layers.While(cond=cond) with while_op.block(): i = paddle.increment(x=i, value=1) - fluid.layers.less_than(x=i, y=loop_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=loop_len), cond) warning = "The Op(while) is not support to set device." warning_num = get_vaild_warning_num(warning, w) diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py index a1e568f6a0..c641155d9f 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps.py @@ -36,7 +36,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py index e7ce16057e..0261df6670 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps11.py @@ -36,7 +36,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py index f97046db94..330d62cfa0 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps12.py @@ -39,7 +39,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py index ead8e6437a..687c8d06ad 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps13.py @@ -40,7 +40,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py index 1ea94b85bc..d75e16f777 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps2.py @@ -39,7 +39,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py index 0f2c840019..3b735d193b 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps3.py @@ -36,7 +36,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py index a7d4f06c02..d1fbfb8937 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps4.py @@ -36,7 +36,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py index 25bb1b0e37..e2e81a747a 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps5.py @@ -36,7 +36,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py index a8c26ed9b7..8e8eacece9 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py +++ b/python/paddle/fluid/tests/unittests/test_dist_fleet_ps6.py @@ -36,7 +36,7 @@ batch_size = 4 class TestPSPassWithBow(unittest.TestCase): def net(self): def get_acc(cos_q_nt, cos_q_pt, batch_size): - cond = fluid.layers.less_than(cos_q_nt, cos_q_pt) + cond = paddle.less_than(cos_q_nt, cos_q_pt) cond = fluid.layers.cast(cond, dtype='float64') cond_3 = paddle.sum(cond) acc = paddle.divide( diff --git a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py index a92052c050..9774ea32e4 100644 --- a/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_dynamic_rnn_stop_gradient.py @@ -36,7 +36,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): max_len = layers.fill_constant( shape=[1], dtype="int64", value=10, force_cpu=True ) - cond = layers.less_than(x=step_idx, y=max_len) + cond = paddle.less_than(x=step_idx, y=max_len) while_op = layers.While(cond) scores = layers.array_write(x, step_idx) with while_op.block(): @@ -53,7 +53,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): score = paddle.gather_nd(x, topk_coordinates) layers.increment(x=step_idx, value=1.0, in_place=True) layers.array_write(score, i=step_idx, array=scores) - length_cond = layers.less_than(x=step_idx, y=max_len) + length_cond = paddle.less_than(x=step_idx, y=max_len) layers.assign(length_cond, cond) out = layers.tensor_array_to_tensor(scores, axis=0, use_stack=True)[0] diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py index d61e6a6f07..943642b857 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_while_op.py @@ -94,14 +94,14 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) array_len.stop_gradient = True - cond = layers.less_than(x=i, y=array_len) + cond = paddle.less_than(x=i, y=array_len) j = layers.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) array_len2.stop_gradient = True - cond2 = layers.less_than(x=j, y=array_len2) + cond2 = paddle.less_than(x=j, y=array_len2) while_op = layers.While(cond=cond) while_op2 = layers.While(cond=cond2) @@ -114,7 +114,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): i = layers.increment(x=i, in_place=True) layers.array_write(result, i=i, array=mem_array) - layers.less_than(x=i, y=array_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=array_len), cond) with while_op2.block(): d2 = layers.array_read(array=data_array, i=j) prev2 = layers.array_read(array=mem_array, i=j) @@ -124,7 +124,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): j = layers.increment(x=j, in_place=True) layers.array_write(result2, i=j, array=mem_array) - layers.less_than(x=j, y=array_len2, cond=cond2) + paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) sum_result.persistable = True diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 4441098c94..a8aa34eb44 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -744,7 +744,7 @@ class TestImperative(unittest.TestCase): paddle.reshape(paddle.sum(inp_data2), [1, 1]), [4, -1], ) - cond = fluid.layers.less_than(x=a, y=b) + cond = paddle.less_than(x=a, y=b) ie = fluid.layers.IfElse(cond) with ie.true_block(): diff --git a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py index 595dd02e43..852f4e5503 100644 --- a/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py +++ b/python/paddle/fluid/tests/unittests/test_ir_memory_optimize_ifelse_op.py @@ -41,7 +41,7 @@ class TestIrMemoryOptimizeIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') limit = layers.fill_constant(shape=[1], dtype='int64', value=5) - cond = layers.less_than(x=label, y=limit) + cond = paddle.less_than(x=label, y=limit) ie = layers.IfElse(cond) with ie.true_block(): diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 1597269b29..1fad291358 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -2423,7 +2423,7 @@ class TestLayer(LayerTest): ten = layers.fill_constant(shape=[1], dtype='int64', value=10) def cond(i): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body(i): return i + 1 @@ -2436,7 +2436,7 @@ class TestLayer(LayerTest): ten = layers.fill_constant(shape=[1], dtype='int64', value=10) def cond1(i): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body1(i): return i + 1 @@ -2464,7 +2464,7 @@ class TestLayer(LayerTest): with self.static_graph(): a = layers.data(name='a', shape=[1], dtype='int64') b = layers.data(name='b', shape=[1], dtype='int64') - cond = layers.less_than(x=a, y=b) + cond = paddle.less_than(x=a, y=b) static_ret = self.get_static_graph_result( feed={"a": value_a, "b": value_b}, fetch_list=[cond] )[0] @@ -2472,14 +2472,14 @@ class TestLayer(LayerTest): with _test_eager_guard(): da = base.to_variable(value_a) db = base.to_variable(value_b) - dcond = layers.less_than(x=da, y=db) + dcond = paddle.less_than(x=da, y=db) for i in range(len(static_ret)): self.assertTrue(dcond.numpy()[i] == static_ret[i]) da = base.to_variable(value_a) db = base.to_variable(value_b) - dcond = layers.less_than(x=da, y=db) + dcond = paddle.less_than(x=da, y=db) for i in range(len(static_ret)): self.assertTrue(dcond.numpy()[i] == static_ret[i]) @@ -2696,8 +2696,8 @@ class TestLayer(LayerTest): y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) - pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 - pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 + pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 + pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1 out_1 = layers.case( @@ -2719,8 +2719,8 @@ class TestLayer(LayerTest): y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) - pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 - pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 + pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 + pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1 out_1 = layers.case( @@ -2736,8 +2736,8 @@ class TestLayer(LayerTest): y = layers.fill_constant(shape=[1], dtype='float32', value=0.1) z = layers.fill_constant(shape=[1], dtype='float32', value=0.2) - pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3 - pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1 + pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 + pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1 out_1 = layers.case( diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 3303481165..6b414afbe4 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -45,13 +45,13 @@ class TestProfiler(unittest.TestCase): ) until = layers.fill_constant([1], dtype='int64', value=10) data_arr = layers.array_write(hidden1, i) - cond = fluid.layers.less_than(x=counter, y=until) + cond = paddle.less_than(x=counter, y=until) while_op = fluid.layers.While(cond=cond) with while_op.block(): hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu') layers.array_write(hidden_n, i, data_arr) fluid.layers.increment(x=counter, value=1, in_place=True) - layers.less_than(x=counter, y=until, cond=cond) + paddle.assign(paddle.less_than(x=counter, y=until), cond) hidden_n = layers.array_read(data_arr, i) hidden2 = fluid.layers.fc(input=hidden_n, size=64, act='relu') diff --git a/python/paddle/fluid/tests/unittests/test_program_code.py b/python/paddle/fluid/tests/unittests/test_program_code.py index 449f97c22f..e60706794f 100644 --- a/python/paddle/fluid/tests/unittests/test_program_code.py +++ b/python/paddle/fluid/tests/unittests/test_program_code.py @@ -14,6 +14,7 @@ import unittest +import paddle import paddle.fluid as fluid import paddle.fluid.layers as layers @@ -44,7 +45,7 @@ class TestProgramToReadableCode(unittest.TestCase): with fluid.program_guard(program): x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) - pred = layers.less_than(y, x) + pred = paddle.less_than(y, x) out = layers.cond(pred, true_func, false_func) def test_program_code(self): diff --git a/python/paddle/fluid/tests/unittests/test_switch.py b/python/paddle/fluid/tests/unittests/test_switch.py index 250d844a3f..5fe69ee420 100644 --- a/python/paddle/fluid/tests/unittests/test_switch.py +++ b/python/paddle/fluid/tests/unittests/test_switch.py @@ -14,6 +14,7 @@ import unittest +import paddle import paddle.fluid.core as core import paddle.fluid.framework as framework import paddle.fluid.layers as layers @@ -34,11 +35,11 @@ class TestSwitch(unittest.TestCase): ) with layers.Switch() as switch: - with switch.case(layers.less_than(x, zero_var)): + with switch.case(paddle.less_than(x, zero_var)): layers.assign(zero_var, result) - with switch.case(layers.less_than(x, one_var)): + with switch.case(paddle.less_than(x, one_var)): layers.assign(one_var, result) - with switch.case(layers.less_than(x, two_var)): + with switch.case(paddle.less_than(x, two_var)): layers.assign(two_var, result) with switch.default(): layers.assign(three_var, result) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py index 123a920af7..e662e1488c 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_array_to_tensor.py @@ -281,7 +281,7 @@ class TestTensorArrayToTensorAPI(unittest.TestCase): fluid.layers.array_write(x0, zero, array) def cond(i, end, array): - return fluid.layers.less_than(i, end) + return paddle.less_than(i, end) def body(i, end, array): prev = fluid.layers.array_read(array, i - 1) diff --git a/python/paddle/fluid/tests/unittests/test_while_loop_op.py b/python/paddle/fluid/tests/unittests/test_while_loop_op.py index d69f114f64..8e733ef920 100644 --- a/python/paddle/fluid/tests/unittests/test_while_loop_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_loop_op.py @@ -29,7 +29,7 @@ paddle.enable_static() class TestApiWhileLoop(unittest.TestCase): def test_var_tuple(self): def cond(i): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body(i): return paddle.add(x=i, y=one) @@ -55,7 +55,7 @@ class TestApiWhileLoop(unittest.TestCase): def test_var_list(self): def cond(i, mem): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body(i, mem): mem = paddle.add(x=mem, y=one) @@ -87,7 +87,7 @@ class TestApiWhileLoop(unittest.TestCase): def test_var_dict(self): def cond(i, ten, test_dict, test_list, test_list_dict): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body(i, ten, test_dict, test_list, test_list_dict): test_dict["test_key"] = i @@ -159,11 +159,11 @@ class TestApiWhileLoop(unittest.TestCase): class TestApiWhileLoop_Nested(unittest.TestCase): def test_nested_net(self): def external_cond(i, j, init, sums): - return layers.less_than(i, loop_len1) + return paddle.less_than(i, loop_len1) def external_body(i, j, init, sums): def internal_cond(j, init, sums): - return layers.less_than(j, loop_len2) + return paddle.less_than(j, loop_len2) def internal_body(j, init, sums): init = paddle.add(x=init, y=ones) @@ -219,7 +219,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): class TestApiWhileLoop_Backward(unittest.TestCase): def test_while_loop_backward(self): def cond(i, x): - return layers.less_than(i, eleven) + return paddle.less_than(i, eleven) def body(i, x): x = paddle.multiply(x=i, y=i) @@ -307,11 +307,11 @@ class TestApiWhileLoop_Backward(unittest.TestCase): class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): def test_nested_net_with_backward_and_lodtensor(self): def external_cond(i, j, x, mem_array): - return layers.less_than(i, array_len) + return paddle.less_than(i, array_len) def external_body(i, j, x, mem_array): def internal_cond(j, x, mem_array): - return layers.less_than(j, array_len2) + return paddle.less_than(j, array_len2) def internal_body(j, x, mem_array): inner_data = layers.array_read(array=data_array, i=j) @@ -390,7 +390,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): class TestApiWhileLoopWithSwitchCase(unittest.TestCase): def test_with_switch_case(self): def cond(i): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body(i): def fn_add_three(): @@ -441,13 +441,13 @@ class TestApiWhileLoop_Error(unittest.TestCase): return layers.increment(i) def cond_returns_bool_tensor(i): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def cond_returns_2d_tensor(i): - return layers.less_than(i, ten_2d) + return paddle.less_than(i, ten_2d) def cond_receives_two_args(i, ten): - return layers.less_than(i, ten) + return paddle.less_than(i, ten) def body(i): return layers.increment(i) diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index cb5f1e3664..f77d9767f3 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -50,12 +50,12 @@ class TestWhileOp(unittest.TestCase): i.stop_gradient = True array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) array_len.stop_gradient = True - cond = layers.less_than(x=i, y=array_len) + cond = paddle.less_than(x=i, y=array_len) j = layers.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) array_len2.stop_gradient = True - cond2 = layers.less_than(x=j, y=array_len2) + cond2 = paddle.less_than(x=j, y=array_len2) while_op = layers.While(cond=cond) while_op2 = layers.While(cond=cond2) with while_op.block(): @@ -65,7 +65,7 @@ class TestWhileOp(unittest.TestCase): i = layers.increment(x=i, in_place=True) layers.array_write(result, i=i, array=mem_array) - layers.less_than(x=i, y=array_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=array_len), cond) with while_op2.block(): d2 = layers.array_read(array=data_array, i=j) @@ -74,7 +74,7 @@ class TestWhileOp(unittest.TestCase): j = layers.increment(x=j, in_place=True) layers.array_write(result2, i=j, array=mem_array) - layers.less_than(x=j, y=array_len2, cond=cond2) + paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) loss = paddle.mean(sum_result) return loss, sum_result @@ -120,7 +120,7 @@ class TestWhileOp(unittest.TestCase): def test_exceptions(self): i = layers.zeros(shape=[2], dtype='int64') array_len = layers.fill_constant(shape=[2], dtype='int64', value=1) - cond = layers.less_than(x=i, y=array_len) + cond = paddle.less_than(x=i, y=array_len) with self.assertRaises(TypeError): layers.While(cond=cond) cond = layers.cast(cond, dtype='float64') diff --git a/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py index ca4824f554..3e126318df 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_device_guard_xpu.py @@ -162,7 +162,7 @@ class TestDeviceGuard(unittest.TestCase): while_op = fluid.layers.While(cond=cond) with while_op.block(): i = paddle.increment(x=i, value=1) - fluid.layers.less_than(x=i, y=loop_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=loop_len), cond) warning = "The Op(while) is not support to set device." warning_num = get_vaild_warning_num(warning, w) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py index e0ee57d2bf..e52e8fdceb 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_while_op_xpu.py @@ -49,12 +49,12 @@ class TestWhileOp(unittest.TestCase): i.stop_gradient = True array_len = layers.fill_constant(shape=[1], dtype='int64', value=1) array_len.stop_gradient = True - cond = layers.less_than(x=i, y=array_len) + cond = paddle.less_than(x=i, y=array_len) j = layers.fill_constant(shape=[1], dtype='int64', value=1) j.stop_gradient = True array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3) array_len2.stop_gradient = True - cond2 = layers.less_than(x=j, y=array_len2) + cond2 = paddle.less_than(x=j, y=array_len2) while_op = layers.While(cond=cond) while_op2 = layers.While(cond=cond2) with while_op.block(): @@ -64,7 +64,7 @@ class TestWhileOp(unittest.TestCase): i = layers.increment(x=i, in_place=True) layers.array_write(result, i=i, array=mem_array) - layers.less_than(x=i, y=array_len, cond=cond) + paddle.assign(paddle.less_than(x=i, y=array_len), cond) with while_op2.block(): d2 = layers.array_read(array=data_array, i=j) @@ -73,7 +73,7 @@ class TestWhileOp(unittest.TestCase): j = layers.increment(x=j, in_place=True) layers.array_write(result2, i=j, array=mem_array) - layers.less_than(x=j, y=array_len2, cond=cond2) + paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) sum_result = layers.array_read(array=mem_array, i=j) loss = paddle.mean(sum_result) return loss, sum_result @@ -119,7 +119,7 @@ class TestWhileOp(unittest.TestCase): def test_exceptions(self): i = layers.zeros(shape=[2], dtype='int64') array_len = layers.fill_constant(shape=[2], dtype='int64', value=1) - cond = layers.less_than(x=i, y=array_len) + cond = paddle.less_than(x=i, y=array_len) with self.assertRaises(TypeError): layers.While(cond=cond) cond = layers.cast(cond, dtype='float64') diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 02cafb77bb..fa622b1409 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -36,7 +36,6 @@ from paddle.fluid.layers import ( from paddle.fluid.layers.control_flow import ( cond, while_loop, - less_than, increment, ) from .return_transformer import ( @@ -782,7 +781,7 @@ def _run_paddle_pop(array, *args): assert isinstance(idx, int) def cond(i, new_array): - return less_than(i, arr_len) + return paddle.less_than(i, arr_len) def body(i, new_array): item = array_read(array=array, i=i) -- GitLab