未验证 提交 6c0755d9 编写于 作者: F feifei-111 提交者: GitHub

fluid API magration : Assert, increment, cond (#48885)

上级 c9f4cfad
...@@ -64,7 +64,7 @@ class HybridParallelInferenceHelper: ...@@ -64,7 +64,7 @@ class HybridParallelInferenceHelper:
element_in_arr = layers.array_read(array=arr, i=step_idx) element_in_arr = layers.array_read(array=arr, i=step_idx)
# write placehold data to global lod_tensor_array, # write placehold data to global lod_tensor_array,
# it need for send_v2 of lod_tensor_array # it need for send_v2 of lod_tensor_array
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(element_in_arr, i=step_idx, array=arr) layers.array_write(element_in_arr, i=step_idx, array=arr)
with paddle.fluid.device_guard(f'{device}:0'): with paddle.fluid.device_guard(f'{device}:0'):
...@@ -137,7 +137,7 @@ class HybridParallelInferenceHelper: ...@@ -137,7 +137,7 @@ class HybridParallelInferenceHelper:
with while_op.block(): with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
input = layers.array_read(array=data, i=step_idx) input = layers.array_read(array=data, i=step_idx)
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(input, i=step_idx, array=data) layers.array_write(input, i=step_idx, array=data)
with paddle.fluid.device_guard(f'{device}:0'): with paddle.fluid.device_guard(f'{device}:0'):
......
...@@ -286,7 +286,7 @@ def _create_cond_block_and_update_optimizer( ...@@ -286,7 +286,7 @@ def _create_cond_block_and_update_optimizer(
) )
new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize) new_grad.op._set_attr(OP_ROLE_KEY, op_maker.OpRole.Optimize)
layers.cond(cond_var, true_fn=true_apply_gradient, false_fn=None) paddle.static.nn.cond(cond_var, true_fn=true_apply_gradient, false_fn=None)
cond_op = main_program.global_block().ops[-1] cond_op = main_program.global_block().ops[-1]
cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize) cond_op._set_attr(OP_ROLE_KEY, OpRole.Optimize)
......
...@@ -83,7 +83,7 @@ class AdaRoundLoss: ...@@ -83,7 +83,7 @@ class AdaRoundLoss:
return round_loss return round_loss
round_loss = fluid.layers.cond( round_loss = paddle.static.nn.cond(
warm_start, warm_start,
lambda: fluid.layers.fill_constant( lambda: fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.0 shape=[1], dtype='float32', value=0.0
......
...@@ -901,7 +901,7 @@ def _dynamic_decode_imperative( ...@@ -901,7 +901,7 @@ def _dynamic_decode_imperative(
next_sequence_lengths, next_sequence_lengths,
) )
control_flow.increment(x=step_idx_tensor, value=1.0, in_place=True) paddle.increment(x=step_idx_tensor, value=1.0)
step_idx += 1 step_idx += 1
cond = paddle.logical_not(paddle.all(finished)) cond = paddle.logical_not(paddle.all(finished))
...@@ -1060,7 +1060,8 @@ def _dynamic_decode_declarative( ...@@ -1060,7 +1060,8 @@ def _dynamic_decode_declarative(
outputs, outputs,
outputs_arrays, outputs_arrays,
) )
control_flow.increment(x=step_idx, value=1.0, in_place=True)
paddle.increment(x=step_idx, value=1.0)
# update the global_finished first, since it might be also in states of # update the global_finished first, since it might be also in states of
# decoder, which otherwise would write a stale finished status to array # decoder, which otherwise would write a stale finished status to array
tensor.assign(next_finished, global_finished) tensor.assign(next_finished, global_finished)
......
...@@ -7300,7 +7300,7 @@ class LookaheadOptimizer: ...@@ -7300,7 +7300,7 @@ class LookaheadOptimizer:
dtype='int32', dtype='int32',
persistable=True, persistable=True,
) )
layers.increment(x=step, value=1.0, in_place=True) paddle.increment(x=step, value=1.0)
# lookahead # lookahead
zero_var = layers.fill_constant( zero_var = layers.fill_constant(
...@@ -7534,7 +7534,7 @@ class GradientMergeOptimizer: ...@@ -7534,7 +7534,7 @@ class GradientMergeOptimizer:
with device_guard("cpu"): with device_guard("cpu"):
# step_var = (step_var + 1) % k_step # step_var = (step_var + 1) % k_step
layers.increment(x=step_var, value=1.0, in_place=True) paddle.increment(x=step_var, value=1.0)
main_block.append_op( main_block.append_op(
type='elementwise_mod', type='elementwise_mod',
inputs={'X': step_var, 'Y': k_step_var}, inputs={'X': step_var, 'Y': k_step_var},
...@@ -7664,7 +7664,7 @@ class GradientMergeOptimizer: ...@@ -7664,7 +7664,7 @@ class GradientMergeOptimizer:
) )
# step3. apply gradient # step3. apply gradient
layers.cond(cond, true_fn=true_apply_gradient, false_fn=None) paddle.static.nn.cond(cond, true_fn=true_apply_gradient, false_fn=None)
return self._optimize_ops return self._optimize_ops
......
...@@ -189,7 +189,7 @@ def get_program(): ...@@ -189,7 +189,7 @@ def get_program():
cur_pred = mlp_while(pre_input) cur_pred = mlp_while(pre_input)
# 更新循环条件 # 更新循环条件
i = fluid.layers.increment(x=i, value=1, in_place=True) i = paddle.increment(x=i, value=1)
fluid.layers.array_write(cur_pred, array=input_array, i=i) fluid.layers.array_write(cur_pred, array=input_array, i=i)
paddle.assign(paddle.less_than(x=i, y=loop_len), cond) paddle.assign(paddle.less_than(x=i, y=loop_len), cond)
......
...@@ -91,7 +91,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase): ...@@ -91,7 +91,7 @@ class TestHybridParallelInferenceHelperClass(unittest.TestCase):
with while_op.block(): with while_op.block():
with paddle.fluid.device_guard(f'{device}:all'): with paddle.fluid.device_guard(f'{device}:all'):
input = layers.array_read(array=data, i=step_idx) input = layers.array_read(array=data, i=step_idx)
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(input, i=step_idx, array=data) layers.array_write(input, i=step_idx, array=data)
with paddle.fluid.device_guard(f'{device}:0'): with paddle.fluid.device_guard(f'{device}:0'):
......
...@@ -89,7 +89,7 @@ def dyfunc_with_if_else3(x): ...@@ -89,7 +89,7 @@ def dyfunc_with_if_else3(x):
m = x + 2 m = x + 2
n = x + 3 n = x + 3
return q, x, y, z return q, x, y, z
q, x, y, z = fluid.layers.cond(paddle.mean(x)[0] < 5, lambda : q, x, y, z = paddle.static.nn.cond(paddle.mean(x)[0] < 5, lambda :
paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y), paddle.jit.dy2static.convert_call(true_fn_0)(q, x, y),
lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q, lambda : paddle.jit.dy2static.convert_call(false_fn_0)(q,
x, y)) x, y))
......
...@@ -97,7 +97,7 @@ class MainNetWithDict(fluid.dygraph.Layer): ...@@ -97,7 +97,7 @@ class MainNetWithDict(fluid.dygraph.Layer):
), ),
} }
# TODO(Aurelius84): The following code will be converted into: # TODO(Aurelius84): The following code will be converted into:
# max_len = layers.cond(paddle.shape(input)[0] != max_len, # max_len = paddle.static.nn.cond(paddle.shape(input)[0] != max_len,
# lambda: paddle.shape(input)[0], lambda: max_len) # lambda: paddle.shape(input)[0], lambda: max_len)
# But max_len should be wrapped into tensor, which is not supported. # But max_len should be wrapped into tensor, which is not supported.
......
...@@ -154,7 +154,7 @@ def dyfunc_ifExp_with_while(x): ...@@ -154,7 +154,7 @@ def dyfunc_ifExp_with_while(x):
def body(i, ten, y): def body(i, ten, y):
# It will be converted into `layers.cond` as followed. # It will be converted into `layers.cond` as followed.
# map_func(lambda x: fluid.layers.cond(i==0, lambda: x, lambda: add_fn(x), y) # map_func(lambda x: paddle.static.nn.cond(i==0, lambda: x, lambda: add_fn(x), y)
y = map_func(lambda x: x if (i == 0) is not None else add_fn(x), y) y = map_func(lambda x: x if (i == 0) is not None else add_fn(x), y)
i += 1 i += 1
return [i, ten, y] return [i, ten, y]
...@@ -183,7 +183,7 @@ def dyfunc_ifExp(x): ...@@ -183,7 +183,7 @@ def dyfunc_ifExp(x):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
# It will be converted into `layers.cond` as followed. # It will be converted into `layers.cond` as followed.
# map_func(lambda x: fluid.layers.cond(i==1, lambda: x, lambda: add_fn(x), y) # map_func(lambda x: paddle.static.nn.cond(i==1, lambda: x, lambda: add_fn(x), y)
# `if (Tensor) == 1` is supported in dygraph. # `if (Tensor) == 1` is supported in dygraph.
y = map_func(lambda x: x if i == 1 else add_fn(x), y) y = map_func(lambda x: x if i == 1 else add_fn(x), y)
return y[0] return y[0]
......
...@@ -83,7 +83,7 @@ def while_loop_dyfunc_with_none(x): ...@@ -83,7 +83,7 @@ def while_loop_dyfunc_with_none(x):
def for_loop_dyfunc(max_len): def for_loop_dyfunc(max_len):
for i in range(max_len): for i in range(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
...@@ -104,14 +104,14 @@ def for_loop_dyfunc2(max_len): ...@@ -104,14 +104,14 @@ def for_loop_dyfunc2(max_len):
def for_loop_dyfunc3(max_len): def for_loop_dyfunc3(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
for i in range(1, 10, 2): for i in range(1, 10, 2):
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
def for_loop_dyfunc4(max_len): def for_loop_dyfunc4(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
for i in range(10, 1, -2): for i in range(10, 1, -2):
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
...@@ -119,7 +119,7 @@ def for_loop_dyfunc_not_support(max_len): ...@@ -119,7 +119,7 @@ def for_loop_dyfunc_not_support(max_len):
ret = fluid.layers.zeros(shape=[1], dtype='float32') ret = fluid.layers.zeros(shape=[1], dtype='float32')
a = -2 a = -2
for i in range(10, 1, a): for i in range(10, 1, a):
fluid.layers.increment(ret, value=2.0, in_place=True) paddle.increment(ret, value=2.0)
return ret return ret
......
...@@ -16,7 +16,7 @@ import unittest ...@@ -16,7 +16,7 @@ import unittest
import warnings import warnings
import paddle import paddle
from paddle.fluid.layers.control_flow import cond from paddle.static.nn import cond
@paddle.jit.to_static @paddle.jit.to_static
......
...@@ -54,7 +54,7 @@ class TestQuantizationSubGraph(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestQuantizationSubGraph(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
core_graph = core.Graph(main_program.desc) core_graph = core.Graph(main_program.desc)
# We should create graph for test, otherwise it will throw a # We should create graph for test, otherwise it will throw a
......
...@@ -121,7 +121,7 @@ class TestIncrementInplace(unittest.TestCase): ...@@ -121,7 +121,7 @@ class TestIncrementInplace(unittest.TestCase):
with paddle.static.program_guard(main_prog, startup_prog): with paddle.static.program_guard(main_prog, startup_prog):
a = paddle.static.data(name="a", shape=[1], dtype='float32') a = paddle.static.data(name="a", shape=[1], dtype='float32')
b = fluid.layers.increment(a) b = paddle.increment(a)
place = paddle.NPUPlace(NPUPlace) place = paddle.NPUPlace(NPUPlace)
......
...@@ -43,9 +43,9 @@ class TestWhileOp(unittest.TestCase): ...@@ -43,9 +43,9 @@ class TestWhileOp(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int32') i = layers.zeros(shape=[1], dtype='int32')
i = layers.cast(i, 'int64') i = layers.cast(i, 'int64')
...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase):
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
...@@ -80,7 +80,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -80,7 +80,7 @@ class TestWhileOp(unittest.TestCase):
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
......
...@@ -57,7 +57,7 @@ class TestCompatibility(unittest.TestCase): ...@@ -57,7 +57,7 @@ class TestCompatibility(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(x, y) pred = paddle.less_than(x, y)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors # out is a tuple containing 2 tensors
return main_program, startup_program, out return main_program, startup_program, out
......
...@@ -30,17 +30,17 @@ def _test_read_write(x): ...@@ -30,17 +30,17 @@ def _test_read_write(x):
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False i.stop_gradient = False
arr = layers.array_write(x=x[0], i=i) arr = layers.array_write(x=x[0], i=i)
i = layers.increment(x=i) i = paddle.increment(x=i)
arr = layers.array_write(x=x[1], i=i, array=arr) arr = layers.array_write(x=x[1], i=i, array=arr)
i = layers.increment(x=i) i = paddle.increment(x=i)
arr = layers.array_write(x=x[2], i=i, array=arr) arr = layers.array_write(x=x[2], i=i, array=arr)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = False i.stop_gradient = False
a0 = layers.array_read(array=arr, i=i) a0 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i) i = paddle.increment(x=i)
a1 = layers.array_read(array=arr, i=i) a1 = layers.array_read(array=arr, i=i)
i = layers.increment(x=i) i = paddle.increment(x=i)
a2 = layers.array_read(array=arr, i=i) a2 = layers.array_read(array=arr, i=i)
mean_a0 = paddle.mean(a0) mean_a0 = paddle.mean(a0)
......
...@@ -17,6 +17,7 @@ import unittest ...@@ -17,6 +17,7 @@ import unittest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from paddle.static.nn.control_flow import Assert
class TestAssertOp(unittest.TestCase): class TestAssertOp(unittest.TestCase):
...@@ -33,7 +34,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -33,7 +34,7 @@ class TestAssertOp(unittest.TestCase):
condition = layers.fill_constant( condition = layers.fill_constant(
shape=[1], dtype='bool', value=True shape=[1], dtype='bool', value=True
) )
layers.Assert(condition, []) Assert(condition, [])
self.run_network(net_func) self.run_network(net_func)
...@@ -42,7 +43,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -42,7 +43,7 @@ class TestAssertOp(unittest.TestCase):
condition = layers.fill_constant( condition = layers.fill_constant(
shape=[1], dtype='bool', value=False shape=[1], dtype='bool', value=False
) )
layers.Assert(condition) Assert(condition)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.run_network(net_func) self.run_network(net_func)
...@@ -52,7 +53,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -52,7 +53,7 @@ class TestAssertOp(unittest.TestCase):
condition = layers.fill_constant( condition = layers.fill_constant(
shape=[1, 2], dtype='bool', value=True shape=[1, 2], dtype='bool', value=True
) )
layers.Assert(condition, []) Assert(condition, [])
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
self.run_network(net_func) self.run_network(net_func)
...@@ -62,7 +63,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -62,7 +63,7 @@ class TestAssertOp(unittest.TestCase):
zero = layers.fill_constant(shape=[1], dtype='int64', value=0) zero = layers.fill_constant(shape=[1], dtype='int64', value=0)
one = layers.fill_constant(shape=[1], dtype='int64', value=1) one = layers.fill_constant(shape=[1], dtype='int64', value=1)
condition = paddle.less_than(one, zero) # False condition = paddle.less_than(one, zero) # False
layers.Assert(condition, [zero, one]) Assert(condition, [zero, one])
print("test_assert_print_data") print("test_assert_print_data")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -72,7 +73,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -72,7 +73,7 @@ class TestAssertOp(unittest.TestCase):
def net_func(): def net_func():
x = layers.fill_constant(shape=[10], dtype='float32', value=2.0) x = layers.fill_constant(shape=[10], dtype='float32', value=2.0)
condition = paddle.max(x) < 1.0 condition = paddle.max(x) < 1.0
layers.Assert(condition, (x,), 5) Assert(condition, (x,), 5)
print("test_assert_summary") print("test_assert_summary")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
...@@ -82,7 +83,7 @@ class TestAssertOp(unittest.TestCase): ...@@ -82,7 +83,7 @@ class TestAssertOp(unittest.TestCase):
def net_func(): def net_func():
x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0) x = layers.fill_constant(shape=[2, 3], dtype='float32', value=2.0)
condition = paddle.max(x) < 1.0 condition = paddle.max(x) < 1.0
layers.Assert(condition, [x], 10, name="test") Assert(condition, [x], 10, name="test")
print("test_assert_summary_greater_than_size") print("test_assert_summary_greater_than_size")
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
......
...@@ -64,7 +64,7 @@ def convolutional_neural_network(use_py_reader): ...@@ -64,7 +64,7 @@ def convolutional_neural_network(use_py_reader):
acc = paddle.static.accuracy(input=prediction, label=label) acc = paddle.static.accuracy(input=prediction, label=label)
i = fluid.layers.zeros(shape=[1], dtype='int64') i = fluid.layers.zeros(shape=[1], dtype='int64')
array = fluid.layers.array_write(x=prediction, i=i) array = fluid.layers.array_write(x=prediction, i=i)
fluid.layers.increment(i) paddle.increment(i)
fluid.layers.array_write(x=acc, i=i, array=array) fluid.layers.array_write(x=acc, i=i, array=array)
return array, img, label, prediction, avg_loss, acc, py_reader return array, img, label, prediction, avg_loss, acc, py_reader
......
...@@ -54,7 +54,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestCondInputOutput(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is one tensor # out is one tensor
place = ( place = (
...@@ -94,7 +94,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -94,7 +94,7 @@ class TestCondInputOutput(unittest.TestCase):
startup_program = Program() startup_program = Program()
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
pred = layers.fill_constant(shape=[1], dtype='bool', value=True) pred = layers.fill_constant(shape=[1], dtype='bool', value=True)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors # out is a tuple containing 2 tensors
place = ( place = (
...@@ -138,7 +138,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestCondInputOutput(unittest.TestCase):
a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7) a = layers.fill_constant(shape=[3, 2, 1], dtype='int32', value=7)
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
a = layers.cond( a = paddle.static.nn.cond(
pred, lambda: true_func(a, i), lambda: false_func(a, i) pred, lambda: true_func(a, i), lambda: false_func(a, i)
) )
place = ( place = (
...@@ -183,9 +183,9 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -183,9 +183,9 @@ class TestCondInputOutput(unittest.TestCase):
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
out1 = layers.cond(pred, true_func, false_func) out1 = paddle.static.nn.cond(pred, true_func, false_func)
out2 = layers.cond(pred, None, false_func) out2 = paddle.static.nn.cond(pred, None, false_func)
out3 = layers.cond(pred, true_func, None) out3 = paddle.static.nn.cond(pred, true_func, None)
place = ( place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() if core.is_compiled_with_cuda()
...@@ -223,13 +223,15 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -223,13 +223,15 @@ class TestCondInputOutput(unittest.TestCase):
i = fluid.data(name="i", shape=[1], dtype='int32') i = fluid.data(name="i", shape=[1], dtype='int32')
pred = (i % 2) == 0 pred = (i % 2) == 0
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
out = layers.cond(pred, i, func_return_one_tensor) out = paddle.static.nn.cond(pred, i, func_return_one_tensor)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
out = layers.cond(pred, func_return_one_tensor, np.asarray([3])) out = paddle.static.nn.cond(
pred, func_return_one_tensor, np.asarray([3])
)
with self.assertRaises(Exception) as e: with self.assertRaises(Exception) as e:
out = layers.cond( out = paddle.static.nn.cond(
pred, func_return_none, func_return_one_tensor pred, func_return_none, func_return_one_tensor
) )
self.assertTrue( self.assertTrue(
...@@ -238,7 +240,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -238,7 +240,7 @@ class TestCondInputOutput(unittest.TestCase):
) )
with self.assertRaises(Exception) as e: with self.assertRaises(Exception) as e:
out = layers.cond( out = paddle.static.nn.cond(
pred, func_return_two_tensors, func_return_none pred, func_return_two_tensors, func_return_none
) )
self.assertTrue( self.assertTrue(
...@@ -247,7 +249,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -247,7 +249,7 @@ class TestCondInputOutput(unittest.TestCase):
) )
with self.assertRaises(Exception) as e: with self.assertRaises(Exception) as e:
out = layers.cond( out = paddle.static.nn.cond(
pred, func_return_one_tensor, func_return_two_tensors pred, func_return_one_tensor, func_return_two_tensors
) )
self.assertTrue( self.assertTrue(
...@@ -268,7 +270,7 @@ class TestCondInputOutput(unittest.TestCase): ...@@ -268,7 +270,7 @@ class TestCondInputOutput(unittest.TestCase):
shape=[1], dtype='float32', value=1.25 shape=[1], dtype='float32', value=1.25
) )
b.stop_gradient = False b.stop_gradient = False
out = layers.cond(a - b < -1.0, lambda: a, lambda: b) out = paddle.static.nn.cond(a - b < -1.0, lambda: a, lambda: b)
append_backward(out) append_backward(out)
place = ( place = (
...@@ -308,14 +310,14 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -308,14 +310,14 @@ class TestCondNestedControlFlow(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def less_than_branch(i, a): def less_than_branch(i, a):
return layers.cond( return paddle.static.nn.cond(
i >= 3.0, i >= 3.0,
lambda: paddle.add(a, a), lambda: paddle.add(a, a),
lambda: paddle.subtract(a, a), lambda: paddle.subtract(a, a),
) )
def greater_equal_branch(i, a): def greater_equal_branch(i, a):
return layers.cond( return paddle.static.nn.cond(
i < 8.0, i < 8.0,
lambda: paddle.multiply(a, a), lambda: paddle.multiply(a, a),
lambda: paddle.divide(a, a), lambda: paddle.divide(a, a),
...@@ -326,7 +328,7 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -326,7 +328,7 @@ class TestCondNestedControlFlow(unittest.TestCase):
with program_guard(main_program, startup_program): with program_guard(main_program, startup_program):
i = fluid.data(name="i", shape=[1], dtype='float32') i = fluid.data(name="i", shape=[1], dtype='float32')
a = 2.0 * i a = 2.0 * i
out = layers.cond( out = paddle.static.nn.cond(
i < 5.0, i < 5.0,
lambda: less_than_branch(i, a), lambda: less_than_branch(i, a),
lambda: greater_equal_branch(i, a), lambda: greater_equal_branch(i, a),
...@@ -370,14 +372,14 @@ class TestCondNestedControlFlow(unittest.TestCase): ...@@ -370,14 +372,14 @@ class TestCondNestedControlFlow(unittest.TestCase):
shape=[1], dtype='float32', value=1.24 shape=[1], dtype='float32', value=1.24
) )
b.stop_gradient = False b.stop_gradient = False
out = fluid.layers.cond( out = paddle.static.nn.cond(
a < b, a < b,
lambda: fluid.layers.cond( lambda: paddle.static.nn.cond(
a - b < -1.0, a - b < -1.0,
lambda: paddle.add(a, b), lambda: paddle.add(a, b),
lambda: paddle.multiply(a, b), lambda: paddle.multiply(a, b),
), ),
lambda: fluid.layers.cond( lambda: paddle.static.nn.cond(
a == b, a == b,
lambda: paddle.subtract(a, b), lambda: paddle.subtract(a, b),
lambda: paddle.pow(a, b), lambda: paddle.pow(a, b),
...@@ -550,7 +552,7 @@ class TestCondBackward(unittest.TestCase): ...@@ -550,7 +552,7 @@ class TestCondBackward(unittest.TestCase):
def cond_func(i, img, label): def cond_func(i, img, label):
predicate = (i % 2) == 0 predicate = (i % 2) == 0
return layers.cond( return paddle.static.nn.cond(
predicate, predicate,
lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10),
...@@ -574,19 +576,19 @@ class TestCondBackward(unittest.TestCase): ...@@ -574,19 +576,19 @@ class TestCondBackward(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
def branch(i, img, label): def branch(i, img, label):
return layers.cond( return paddle.static.nn.cond(
(i % 2) == 0, (i % 2) == 0,
lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10),
) )
def cond_func_simple_net_at_true(i, img, label): def cond_func_simple_net_at_true(i, img, label):
return layers.cond( return paddle.static.nn.cond(
i < 5, lambda: branch(i, img, label), lambda: paddle.mean(img) i < 5, lambda: branch(i, img, label), lambda: paddle.mean(img)
) )
def cond_func_simple_net_at_false(i, img, label): def cond_func_simple_net_at_false(i, img, label):
return layers.cond( return paddle.static.nn.cond(
i < 5, lambda: paddle.mean(img), lambda: branch(i, img, label) i < 5, lambda: paddle.mean(img), lambda: branch(i, img, label)
) )
...@@ -626,14 +628,14 @@ class TestCondBackward(unittest.TestCase): ...@@ -626,14 +628,14 @@ class TestCondBackward(unittest.TestCase):
predicate = (i % 2) == 0 predicate = (i % 2) == 0
else: else:
predicate = (i % 2) != 0 predicate = (i % 2) != 0
return layers.cond( return paddle.static.nn.cond(
predicate, predicate,
lambda: simple_fc_net_with_inputs(img, label, class_num=10), lambda: simple_fc_net_with_inputs(img, label, class_num=10),
lambda: batchnorm_fc_with_inputs(img, label, class_num=10), lambda: batchnorm_fc_with_inputs(img, label, class_num=10),
) )
def cond_func(i, img, label): def cond_func(i, img, label):
return layers.cond( return paddle.static.nn.cond(
i < 5, i < 5,
lambda: branch(i, img, label, True), lambda: branch(i, img, label, True),
lambda: branch(i, img, label, False), lambda: branch(i, img, label, False),
...@@ -665,16 +667,16 @@ class TestCondWithError(unittest.TestCase): ...@@ -665,16 +667,16 @@ class TestCondWithError(unittest.TestCase):
return pred return pred
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(None, func, func) paddle.static.nn.cond(None, func, func)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(pred, func, set()) paddle.static.nn.cond(pred, func, set())
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(pred, set(), func) paddle.static.nn.cond(pred, set(), func)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(pred, func, func, set()) paddle.static.nn.cond(pred, func, func, set())
class TestCondWithDict(unittest.TestCase): class TestCondWithDict(unittest.TestCase):
......
...@@ -228,7 +228,7 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase): ...@@ -228,7 +228,7 @@ class TestCloneWithStopGradientInSubBlock(unittest.TestCase):
hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6) hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6)
return hidden2 return hidden2
hidden2 = fluid.layers.cond(cond, true_fn, false_fn) hidden2 = paddle.static.nn.cond(cond, true_fn, false_fn)
loss = paddle.nn.functional.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=fluid.layers.fc(hidden2, size=10, act='softmax'), input=fluid.layers.fc(hidden2, size=10, act='softmax'),
...@@ -271,7 +271,7 @@ class TestCloneWithRaise(unittest.TestCase): ...@@ -271,7 +271,7 @@ class TestCloneWithRaise(unittest.TestCase):
hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6) hidden2 = fluid.layers.dropout(hidden1, dropout_prob=0.6)
return hidden2 return hidden2
hidden2 = fluid.layers.cond(cond, true_fn, false_fn) hidden2 = paddle.static.nn.cond(cond, true_fn, false_fn)
loss = paddle.nn.functional.cross_entropy( loss = paddle.nn.functional.cross_entropy(
input=fluid.layers.fc(hidden2, size=10, act='softmax'), input=fluid.layers.fc(hidden2, size=10, act='softmax'),
label=fluid.layers.data(name='label', shape=[1], dtype='int64'), label=fluid.layers.data(name='label', shape=[1], dtype='int64'),
......
...@@ -53,7 +53,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False): ...@@ -53,7 +53,7 @@ def build_and_run_program(place, batch_size, beam_size, stop_gradient=False):
topk_coordinates = paddle.stack([batch_pos, indices], axis=2) topk_coordinates = paddle.stack([batch_pos, indices], axis=2)
topk_coordinates.stop_gradient = stop_gradient topk_coordinates.stop_gradient = stop_gradient
score = paddle.gather_nd(x, topk_coordinates) score = paddle.gather_nd(x, topk_coordinates)
layers.increment(x=step_idx, value=1.0, in_place=True) paddle.increment(x=step_idx, value=1.0)
layers.array_write(score, i=step_idx, array=scores) layers.array_write(score, i=step_idx, array=scores)
length_cond = paddle.less_than(x=step_idx, y=max_len) length_cond = paddle.less_than(x=step_idx, y=max_len)
layers.assign(length_cond, cond) layers.assign(length_cond, cond)
......
...@@ -83,10 +83,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -83,10 +83,10 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
...@@ -112,7 +112,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
prev = paddle.reshape(prev, shape=[10]) prev = paddle.reshape(prev, shape=[10])
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
with while_op2.block(): with while_op2.block():
...@@ -122,7 +122,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase): ...@@ -122,7 +122,7 @@ class TestEagerDeletionWhileOpBase(unittest.TestCase):
prev2 = paddle.reshape(prev2, shape=[10]) prev2 = paddle.reshape(prev2, shape=[10])
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
......
...@@ -16,8 +16,9 @@ import unittest ...@@ -16,8 +16,9 @@ import unittest
import numpy as np import numpy as np
import paddle
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.fluid.layers import array_write, data, increment, mul, zeros from paddle.fluid.layers import array_write, data, mul, zeros
class TestExecutor(unittest.TestCase): class TestExecutor(unittest.TestCase):
...@@ -26,13 +27,13 @@ class TestExecutor(unittest.TestCase): ...@@ -26,13 +27,13 @@ class TestExecutor(unittest.TestCase):
a = data(name='a', shape=[784], dtype='float32') a = data(name='a', shape=[784], dtype='float32')
array = array_write(x=a, i=i) array = array_write(x=a, i=i)
i = increment(i) i = paddle.increment(i)
b = data( b = data(
name='b', shape=[784, 100], dtype='float32', append_batch_size=False name='b', shape=[784, 100], dtype='float32', append_batch_size=False
) )
array_write(x=b, i=i, array=array) array_write(x=b, i=i, array=array)
i = increment(i) i = paddle.increment(i)
out = mul(x=a, y=b) out = mul(x=a, y=b)
array_write(x=out, i=i, array=array) array_write(x=out, i=i, array=array)
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
from simple_nets import simple_fc_net, simple_fc_net_with_inputs from simple_nets import simple_fc_net, simple_fc_net_with_inputs
import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
...@@ -35,9 +36,9 @@ class TestFetchLoDTensorArray(unittest.TestCase): ...@@ -35,9 +36,9 @@ class TestFetchLoDTensorArray(unittest.TestCase):
opt.minimize(loss) opt.minimize(loss)
array = layers.array_write(x=img, i=i) array = layers.array_write(x=img, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(x=label, i=i, array=array) layers.array_write(x=label, i=i, array=array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(x=loss, i=i, array=array) layers.array_write(x=loss, i=i, array=array)
return loss, array return loss, array
......
...@@ -1579,7 +1579,7 @@ class TestLayer(LayerTest): ...@@ -1579,7 +1579,7 @@ class TestLayer(LayerTest):
b = fluid.layers.fill_constant( b = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.23 shape=[1], dtype='float32', value=0.23
) )
out = fluid.layers.cond( out = paddle.static.nn.cond(
a >= b, a >= b,
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
...@@ -1599,12 +1599,12 @@ class TestLayer(LayerTest): ...@@ -1599,12 +1599,12 @@ class TestLayer(LayerTest):
b = fluid.dygraph.to_variable( b = fluid.dygraph.to_variable(
np.array([0.23]).astype('float32') np.array([0.23]).astype('float32')
) )
out = layers.cond( out = paddle.static.nn.cond(
a < b, a < b,
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
) )
out2 = layers.cond( out2 = paddle.static.nn.cond(
a >= b, a >= b,
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
...@@ -1615,18 +1615,18 @@ class TestLayer(LayerTest): ...@@ -1615,18 +1615,18 @@ class TestLayer(LayerTest):
eager_dynamic_res, eager_dynamic_res2 eager_dynamic_res, eager_dynamic_res2
) )
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a < b, 'str', 'str') paddle.static.nn.cond(a < b, 'str', 'str')
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a >= b, 'str', 'str') paddle.static.nn.cond(a >= b, 'str', 'str')
a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32')) a = fluid.dygraph.to_variable(np.array([0.1]).astype('float32'))
b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32')) b = fluid.dygraph.to_variable(np.array([0.23]).astype('float32'))
out = layers.cond( out = paddle.static.nn.cond(
a < b, a < b,
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
) )
out2 = layers.cond( out2 = paddle.static.nn.cond(
a >= b, a >= b,
lambda: greater_equal_branch(a, b), lambda: greater_equal_branch(a, b),
lambda: less_than_branch(a, b), lambda: less_than_branch(a, b),
...@@ -1635,9 +1635,9 @@ class TestLayer(LayerTest): ...@@ -1635,9 +1635,9 @@ class TestLayer(LayerTest):
dynamic_res2 = out2.numpy() dynamic_res2 = out2.numpy()
np.testing.assert_array_equal(dynamic_res, dynamic_res2) np.testing.assert_array_equal(dynamic_res, dynamic_res2)
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a < b, 'str', 'str') paddle.static.nn.cond(a < b, 'str', 'str')
with self.assertRaises(TypeError): with self.assertRaises(TypeError):
layers.cond(a >= b, 'str', 'str') paddle.static.nn.cond(a >= b, 'str', 'str')
np.testing.assert_array_equal(static_res, dynamic_res) np.testing.assert_array_equal(static_res, dynamic_res)
np.testing.assert_array_equal(static_res, eager_dynamic_res) np.testing.assert_array_equal(static_res, eager_dynamic_res)
......
...@@ -237,7 +237,7 @@ class TestMathOpPatches(unittest.TestCase): ...@@ -237,7 +237,7 @@ class TestMathOpPatches(unittest.TestCase):
one = paddle.ones(shape=[1], dtype='int32') one = paddle.ones(shape=[1], dtype='int32')
zero = fluid.layers.zeros(shape=[1], dtype='int32') zero = fluid.layers.zeros(shape=[1], dtype='int32')
cond = one == zero cond = one == zero
c = fluid.layers.cond(cond, lambda: a + b, lambda: a - b) c = paddle.static.nn.cond(cond, lambda: a + b, lambda: a - b)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -115,7 +115,7 @@ class SimpleNetWithCond: ...@@ -115,7 +115,7 @@ class SimpleNetWithCond:
return cond_res return cond_res
cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32')) cond_i = fluid.layers.assign(np.array([cond_i], dtype='float32'))
sum_cond = fluid.layers.cond(cond_i > 1.0, cond_true, cond_false) sum_cond = paddle.static.nn.cond(cond_i > 1.0, cond_true, cond_false)
sum_all = paddle.add_n([sum_xy, sub_yz, sum_cond]) sum_all = paddle.add_n([sum_xy, sub_yz, sum_cond])
mean_out = paddle.mean(sum_all) mean_out = paddle.mean(sum_all)
if use_bf16: if use_bf16:
......
...@@ -50,7 +50,7 @@ class TestProfiler(unittest.TestCase): ...@@ -50,7 +50,7 @@ class TestProfiler(unittest.TestCase):
with while_op.block(): with while_op.block():
hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu') hidden_n = fluid.layers.fc(input=hidden1, size=64, act='relu')
layers.array_write(hidden_n, i, data_arr) layers.array_write(hidden_n, i, data_arr)
fluid.layers.increment(x=counter, value=1, in_place=True) paddle.increment(x=counter, value=1)
paddle.assign(paddle.less_than(x=counter, y=until), cond) paddle.assign(paddle.less_than(x=counter, y=until), cond)
hidden_n = layers.array_read(data_arr, i) hidden_n = layers.array_read(data_arr, i)
......
...@@ -46,7 +46,7 @@ class TestProgramToReadableCode(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestProgramToReadableCode(unittest.TestCase):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1) x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23) y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = paddle.less_than(y, x) pred = paddle.less_than(y, x)
out = layers.cond(pred, true_func, false_func) out = paddle.static.nn.cond(pred, true_func, false_func)
def test_program_code(self): def test_program_code(self):
self.var._to_readable_code() self.var._to_readable_code()
......
...@@ -59,7 +59,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -59,7 +59,7 @@ class TestApiWhileLoop(unittest.TestCase):
def body(i, mem): def body(i, mem):
mem = paddle.add(x=mem, y=one) mem = paddle.add(x=mem, y=one)
i = layers.increment(i) i = paddle.increment(i)
return [i, mem] return [i, mem]
main_program = Program() main_program = Program()
...@@ -100,7 +100,7 @@ class TestApiWhileLoop(unittest.TestCase): ...@@ -100,7 +100,7 @@ class TestApiWhileLoop(unittest.TestCase):
test_list_dict[0]["test_key"] test_list_dict[0]["test_key"]
) )
i = layers.increment(i) i = paddle.increment(i)
return [i, ten, test_dict, test_list, test_list_dict] return [i, ten, test_dict, test_list, test_list_dict]
main_program = Program() main_program = Program()
...@@ -174,7 +174,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -174,7 +174,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
def internal_body(j, init, sums): def internal_body(j, init, sums):
init = paddle.add(x=init, y=ones) init = paddle.add(x=init, y=ones)
sums = paddle.add(x=init, y=sums) sums = paddle.add(x=init, y=sums)
j = layers.increment(j) j = paddle.increment(j)
return [j, init, sums] return [j, init, sums]
result = paddle.static.nn.while_loop( result = paddle.static.nn.while_loop(
...@@ -184,7 +184,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase): ...@@ -184,7 +184,7 @@ class TestApiWhileLoop_Nested(unittest.TestCase):
init = result[1] init = result[1]
sums = result[2] sums = result[2]
sums = paddle.add(x=init, y=sums) sums = paddle.add(x=init, y=sums)
i = layers.increment(i) i = paddle.increment(i)
return [i, j, init, sums] return [i, j, init, sums]
main_program = Program() main_program = Program()
...@@ -229,7 +229,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase): ...@@ -229,7 +229,7 @@ class TestApiWhileLoop_Backward(unittest.TestCase):
def body(i, x): def body(i, x):
x = paddle.multiply(x=i, y=i) x = paddle.multiply(x=i, y=i)
i = layers.increment(i) i = paddle.increment(i)
return [i, x] return [i, x]
main_program = Program() main_program = Program()
...@@ -324,7 +324,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -324,7 +324,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
inner_prev = layers.array_read(array=mem_array, i=j) inner_prev = layers.array_read(array=mem_array, i=j)
inner_sum_0 = paddle.add(x=inner_data, y=inner_prev) inner_sum_0 = paddle.add(x=inner_data, y=inner_prev)
inner_sum_1 = paddle.add(x=x, y=inner_sum_0) inner_sum_1 = paddle.add(x=x, y=inner_sum_0)
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(inner_sum_1, i=j, array=mem_array) layers.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array] return [j, x, mem_array]
...@@ -332,7 +332,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -332,7 +332,7 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
outer_prev = layers.array_read(array=mem_array, i=i) outer_prev = layers.array_read(array=mem_array, i=i)
outer_sum_0 = paddle.add(x=outer_data, y=outer_prev) outer_sum_0 = paddle.add(x=outer_data, y=outer_prev)
outer_sum_1 = paddle.add(x=x, y=outer_sum_0) outer_sum_1 = paddle.add(x=x, y=outer_sum_0)
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(outer_sum_1, i=i, array=mem_array) layers.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = paddle.static.nn.while_loop( j, x, mem_array = paddle.static.nn.while_loop(
internal_cond, internal_body, [j, x, mem_array] internal_cond, internal_body, [j, x, mem_array]
...@@ -352,9 +352,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase): ...@@ -352,9 +352,9 @@ class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
...@@ -444,7 +444,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -444,7 +444,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
return 1 return 1
def cond_returns_not_bool_tensor(i): def cond_returns_not_bool_tensor(i):
return layers.increment(i) return paddle.increment(i)
def cond_returns_bool_tensor(i): def cond_returns_bool_tensor(i):
return paddle.less_than(i, ten) return paddle.less_than(i, ten)
...@@ -456,14 +456,14 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -456,14 +456,14 @@ class TestApiWhileLoop_Error(unittest.TestCase):
return paddle.less_than(i, ten) return paddle.less_than(i, ten)
def body(i): def body(i):
return layers.increment(i) return paddle.increment(i)
def body_returns_error_length(i): def body_returns_error_length(i):
i = layers.increment(i) i = paddle.increment(i)
return [i, i] return [i, i]
def body_returns_error_type(i, ten): def body_returns_error_type(i, ten):
return layers.increment(i) return paddle.increment(i)
def cond_returns_with_mutable_dict(i, test_dict): def cond_returns_with_mutable_dict(i, test_dict):
return i > 0 return i > 0
...@@ -472,7 +472,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -472,7 +472,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
test_dict['new_key'] = layers.fill_constant( test_dict['new_key'] = layers.fill_constant(
shape=[1], dtype='int64', value=1 shape=[1], dtype='int64', value=1
) )
return layers.increment(i), test_dict return paddle.increment(i), test_dict
def cond_returns_with_mutable_list(i, test_list): def cond_returns_with_mutable_list(i, test_list):
return i > 0 return i > 0
...@@ -481,7 +481,7 @@ class TestApiWhileLoop_Error(unittest.TestCase): ...@@ -481,7 +481,7 @@ class TestApiWhileLoop_Error(unittest.TestCase):
test_list.append( test_list.append(
layers.fill_constant(shape=[1], dtype='int64', value=1) layers.fill_constant(shape=[1], dtype='int64', value=1)
) )
return layers.increment(i), test_list return paddle.increment(i), test_list
main_program = Program() main_program = Program()
startup_program = Program() startup_program = Program()
......
...@@ -42,9 +42,9 @@ class TestWhileOp(unittest.TestCase): ...@@ -42,9 +42,9 @@ class TestWhileOp(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
...@@ -63,7 +63,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -63,7 +63,7 @@ class TestWhileOp(unittest.TestCase):
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
...@@ -72,7 +72,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -72,7 +72,7 @@ class TestWhileOp(unittest.TestCase):
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
...@@ -134,7 +134,7 @@ class BadInputTest(unittest.TestCase): ...@@ -134,7 +134,7 @@ class BadInputTest(unittest.TestCase):
def test_bad_x(): def test_bad_x():
x = [1, 2, 3] x = [1, 2, 3]
fluid.layers.increment(x) paddle.increment(x)
self.assertRaises(TypeError, test_bad_x) self.assertRaises(TypeError, test_bad_x)
......
...@@ -41,9 +41,9 @@ class TestWhileOp(unittest.TestCase): ...@@ -41,9 +41,9 @@ class TestWhileOp(unittest.TestCase):
init = layers.zeros(shape=[10], dtype='float32') init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i) mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i) data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d1, i, array=data_array) layers.array_write(d1, i, array=data_array)
i = layers.increment(i) i = paddle.increment(i)
layers.array_write(d2, i, array=data_array) layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64') i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True i.stop_gradient = True
...@@ -62,7 +62,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -62,7 +62,7 @@ class TestWhileOp(unittest.TestCase):
prev = layers.array_read(array=mem_array, i=i) prev = layers.array_read(array=mem_array, i=i)
result = layers.sums(input=[d, prev]) result = layers.sums(input=[d, prev])
i = layers.increment(x=i, in_place=True) i = paddle.increment(x=i)
layers.array_write(result, i=i, array=mem_array) layers.array_write(result, i=i, array=mem_array)
paddle.assign(paddle.less_than(x=i, y=array_len), cond) paddle.assign(paddle.less_than(x=i, y=array_len), cond)
...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestWhileOp(unittest.TestCase):
prev2 = layers.array_read(array=mem_array, i=j) prev2 = layers.array_read(array=mem_array, i=j)
result2 = layers.sums(input=[d2, prev2]) result2 = layers.sums(input=[d2, prev2])
j = layers.increment(x=j, in_place=True) j = paddle.increment(x=j)
layers.array_write(result2, i=j, array=mem_array) layers.array_write(result2, i=j, array=mem_array)
paddle.assign(paddle.less_than(x=j, y=array_len2), cond2) paddle.assign(paddle.less_than(x=j, y=array_len2), cond2)
sum_result = layers.array_read(array=mem_array, i=j) sum_result = layers.array_read(array=mem_array, i=j)
......
...@@ -341,7 +341,7 @@ def get_value_for_bool_tensor(var, item): ...@@ -341,7 +341,7 @@ def get_value_for_bool_tensor(var, item):
var_shape[0] = 0 var_shape[0] = 0
return paddle.empty(var_shape, dtype=var.dtype) return paddle.empty(var_shape, dtype=var.dtype)
from .layers.control_flow import cond from paddle.static.nn import cond
return cond( return cond(
item.any(), lambda: idx_not_empty(var, item), lambda: idx_empty(var) item.any(), lambda: idx_not_empty(var, item), lambda: idx_empty(var)
...@@ -874,7 +874,7 @@ def set_value_for_bool_tensor(var, item, value): ...@@ -874,7 +874,7 @@ def set_value_for_bool_tensor(var, item, value):
out = scatter_nd_add(var, idx, gather_val_new) out = scatter_nd_add(var, idx, gather_val_new)
var[:] = out var[:] = out
from .layers.control_flow import cond from paddle.static.nn import cond
# If all the bool index is False, just do nothing # If all the bool index is False, just do nothing
cond(item.any(), lambda: idx_not_empty(var, item, value)) cond(item.any(), lambda: idx_not_empty(var, item, value))
......
...@@ -19,7 +19,7 @@ from .variable_trans_func import ( ...@@ -19,7 +19,7 @@ from .variable_trans_func import (
to_static_variable, to_static_variable,
) )
from paddle.fluid.framework import core, Variable from paddle.fluid.framework import core, Variable
from paddle.fluid.layers import Assert, Print from paddle.fluid.layers import Print
from paddle.fluid.layers import ( from paddle.fluid.layers import (
array_read, array_read,
array_write, array_write,
...@@ -33,9 +33,7 @@ from paddle.fluid.layers import ( ...@@ -33,9 +33,7 @@ from paddle.fluid.layers import (
control_flow, control_flow,
) )
from paddle.fluid.layers.control_flow import ( from paddle.fluid.layers.control_flow import (
cond,
while_loop, while_loop,
increment,
) )
from .return_transformer import ( from .return_transformer import (
RETURN_NO_VALUE_VAR_NAME, RETURN_NO_VALUE_VAR_NAME,
...@@ -395,7 +393,7 @@ def _run_paddle_cond( ...@@ -395,7 +393,7 @@ def _run_paddle_cond(
return ret return ret
try: try:
cond_outs = control_flow.cond( cond_outs = paddle.static.nn.cond(
pred, new_true_fn, new_false_fn, None, return_name_ids pred, new_true_fn, new_false_fn, None, return_name_ids
) )
except Exception as e: except Exception as e:
...@@ -734,6 +732,8 @@ def convert_assert(cond, message=""): ...@@ -734,6 +732,8 @@ def convert_assert(cond, message=""):
if isinstance(cond, Variable): if isinstance(cond, Variable):
cond = cast(cond, "bool") cond = cast(cond, "bool")
# NOTE: message is not used because Paddle Assert has no corresponding parameter to use. # NOTE: message is not used because Paddle Assert has no corresponding parameter to use.
from paddle.static.nn.control_flow import Assert
return Assert(cond) return Assert(cond)
else: else:
assert cond, message assert cond, message
...@@ -786,7 +786,8 @@ def _run_paddle_pop(array, *args): ...@@ -786,7 +786,8 @@ def _run_paddle_pop(array, *args):
def body(i, new_array): def body(i, new_array):
item = array_read(array=array, i=i) item = array_read(array=array, i=i)
array_write(item, paddle.tensor.array_length(new_array), new_array) array_write(item, paddle.tensor.array_length(new_array), new_array)
i = increment(i)
i = paddle.increment(i)
return i, new_array return i, new_array
arr_len = paddle.tensor.array_length(array) arr_len = paddle.tensor.array_length(array)
...@@ -816,7 +817,9 @@ def _slice_tensor_array(array, start, end): ...@@ -816,7 +817,9 @@ def _slice_tensor_array(array, start, end):
new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0]) new_array = paddle.slice(array, starts=[start], ends=[end], axes=[0])
return new_array return new_array
new_array = cond(start == end, true_fn, lambda: false_fn(array, start, end)) new_array = paddle.static.nn.cond(
start == end, true_fn, lambda: false_fn(array, start, end)
)
return new_array return new_array
......
...@@ -29,7 +29,6 @@ from .control_flow import ( ...@@ -29,7 +29,6 @@ from .control_flow import (
from .common import bilinear_tensor_product # noqa: F401 from .common import bilinear_tensor_product # noqa: F401
from .common import py_func # noqa: F401 from .common import py_func # noqa: F401
from ...tensor.creation import create_parameter # noqa: F401 from ...tensor.creation import create_parameter # noqa: F401
from ...fluid.layers import cond # noqa: F401
from ...fluid.layers import conv2d # noqa: F401 from ...fluid.layers import conv2d # noqa: F401
from ...fluid.layers import crf_decoding # noqa: F401 from ...fluid.layers import crf_decoding # noqa: F401
from ...fluid.layers import layer_norm # noqa: F401 from ...fluid.layers import layer_norm # noqa: F401
...@@ -59,6 +58,8 @@ from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401 ...@@ -59,6 +58,8 @@ from ...fluid.layers.sequence_lod import sequence_scatter # noqa: F401
from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401 from ...fluid.layers.sequence_lod import sequence_enumerate # noqa: F401
from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401 from ...fluid.layers.sequence_lod import sequence_reverse # noqa: F401
from .control_flow import cond
__all__ = [ # noqa __all__ = [ # noqa
'fc', 'fc',
'batch_norm', 'batch_norm',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册