未验证 提交 24bea401 编写于 作者: Q qingqing01 提交者: GitHub

Hiden some LoDTensor ralated ops' Python wrapper. (#12230)

* Hiden some LoDTensor ralatted ops' Python wrapper.
上级 6169d724
......@@ -200,31 +200,23 @@ paddle.fluid.layers.argsort ArgSpec(args=['input', 'axis', 'name'], varargs=None
paddle.fluid.layers.ones ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,))
paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.split_lod_tensor ArgSpec(args=['input', 'mask', 'level'], varargs=None, keywords=None, defaults=(0,))
paddle.fluid.layers.merge_lod_tensor ArgSpec(args=['in_true', 'in_false', 'x', 'mask', 'level'], varargs=None, keywords=None, defaults=(0,))
paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.While.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.lod_rank_table ArgSpec(args=['x', 'level'], varargs=None, keywords=None, defaults=(0,))
paddle.fluid.layers.max_sequence_len ArgSpec(args=['rank_table'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.lod_tensor_to_array ArgSpec(args=['x', 'table'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.array_to_lod_tensor ArgSpec(args=['x', 'table'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.increment ArgSpec(args=['x', 'value', 'in_place'], varargs=None, keywords=None, defaults=(1.0, True))
paddle.fluid.layers.array_write ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.create_array ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.less_than ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None))
paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords='ignored', defaults=(None,))
paddle.fluid.layers.array_read ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.shrink_memory ArgSpec(args=['x', 'i', 'table'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.array_length ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.IfElse.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.IfElse.false_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None)
paddle.fluid.layers.IfElse.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None)
......@@ -233,9 +225,6 @@ paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs',
paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ConditionalBlock.__init__ ArgSpec(args=['self', 'inputs', 'is_scalar_condition', 'name'], varargs=None, keywords=None, defaults=(False, None))
paddle.fluid.layers.ConditionalBlock.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.ConditionalBlock.complete ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,))
paddle.fluid.layers.StaticRNN.complete_op ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1))
......
......@@ -23,25 +23,17 @@ from ops import logical_and, logical_not, logical_or
import numpy
__all__ = [
'split_lod_tensor',
'merge_lod_tensor',
'While',
'Switch',
'lod_rank_table',
'max_sequence_len',
'lod_tensor_to_array',
'array_to_lod_tensor',
'increment',
'array_write',
'create_array',
'less_than',
'equal',
'array_read',
'shrink_memory',
'array_length',
'IfElse',
'DynamicRNN',
'ConditionalBlock',
'StaticRNN',
'reorder_lod_tensor_by_rank',
'ParallelDo',
......@@ -1457,7 +1449,7 @@ class IfElse(object):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self.parent_block()
parent_block = self._parent_block()
out_true = parent_block.create_var(
name=unique_name.generate('ifelse_input' + self.helper.name),
dtype=x.dtype)
......@@ -1483,7 +1475,7 @@ class IfElse(object):
else:
return out_false
def parent_block(self):
def _parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
......@@ -1499,7 +1491,7 @@ class IfElse(object):
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self.parent_block()
parent_block = self._parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
......
......@@ -19,6 +19,10 @@ from paddle.fluid.executor import Executor
from paddle.fluid.optimizer import MomentumOptimizer
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
from paddle.fluid.layers.control_flow import ConditionalBlock
import unittest
import numpy as np
......@@ -34,11 +38,10 @@ class TestMNISTIfElseOp(unittest.TestCase):
limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
cond = layers.less_than(x=label, y=limit)
true_image, false_image = layers.split_lod_tensor(
input=image, mask=cond)
true_image, false_image = split_lod_tensor(input=image, mask=cond)
true_out = layers.create_tensor(dtype='float32')
true_cond = layers.ConditionalBlock([cond])
true_cond = ConditionalBlock([cond])
with true_cond.block():
hidden = layers.fc(input=true_image, size=100, act='tanh')
......@@ -46,14 +49,14 @@ class TestMNISTIfElseOp(unittest.TestCase):
layers.assign(input=prob, output=true_out)
false_out = layers.create_tensor(dtype='float32')
false_cond = layers.ConditionalBlock([cond])
false_cond = ConditionalBlock([cond])
with false_cond.block():
hidden = layers.fc(input=false_image, size=200, act='tanh')
prob = layers.fc(input=hidden, size=10, act='softmax')
layers.assign(input=prob, output=false_out)
prob = layers.merge_lod_tensor(
prob = merge_lod_tensor(
in_true=true_out, in_false=false_out, mask=cond, x=image)
loss = layers.cross_entropy(input=prob, label=label)
avg_loss = layers.mean(loss)
......
......@@ -18,14 +18,15 @@ import paddle.fluid.core as core
from paddle.fluid.framework import default_startup_program, default_main_program
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
from paddle.fluid.layers.control_flow import ConditionalBlock
import numpy
class ConditionalBlock(unittest.TestCase):
class ConditionalBlockTest(unittest.TestCase):
def test_forward(self):
data = layers.data(name='X', shape=[1], dtype='float32')
data.stop_gradient = False
cond = layers.ConditionalBlock(inputs=[data])
cond = ConditionalBlock(inputs=[data])
out = layers.create_tensor(dtype='float32')
with cond.block():
hidden = layers.fc(input=data, size=10)
......
......@@ -16,7 +16,7 @@ import unittest
import paddle.fluid.framework as framework
class ConditionalBlock(unittest.TestCase):
class ConstantTest(unittest.TestCase):
def test_const_value(self):
self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD")
self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@")
......
......@@ -17,6 +17,12 @@ import paddle
import unittest
import numpy
from paddle.fluid.layers.control_flow import lod_rank_table
from paddle.fluid.layers.control_flow import max_sequence_len
from paddle.fluid.layers.control_flow import lod_tensor_to_array
from paddle.fluid.layers.control_flow import array_to_lod_tensor
from paddle.fluid.layers.control_flow import shrink_memory
class TestDynRNN(unittest.TestCase):
def setUp(self):
......@@ -38,12 +44,11 @@ class TestDynRNN(unittest.TestCase):
label = fluid.layers.data(name='label', shape=[1], dtype='float32')
rank_table = fluid.layers.lod_rank_table(x=sent_emb)
rank_table = lod_rank_table(x=sent_emb)
sent_emb_array = fluid.layers.lod_tensor_to_array(
x=sent_emb, table=rank_table)
sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table)
seq_len = fluid.layers.max_sequence_len(rank_table=rank_table)
seq_len = max_sequence_len(rank_table=rank_table)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
i.stop_gradient = False
......@@ -66,7 +71,7 @@ class TestDynRNN(unittest.TestCase):
mem = fluid.layers.array_read(array=mem_array, i=i)
ipt = fluid.layers.array_read(array=sent_emb_array, i=i)
mem = fluid.layers.shrink_memory(x=mem, i=i, table=rank_table)
mem = shrink_memory(x=mem, i=i, table=rank_table)
hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh')
......@@ -75,8 +80,7 @@ class TestDynRNN(unittest.TestCase):
fluid.layers.array_write(x=hidden, i=i, array=mem_array)
fluid.layers.less_than(x=i, y=seq_len, cond=cond)
all_timesteps = fluid.layers.array_to_lod_tensor(
x=out, table=rank_table)
all_timesteps = array_to_lod_tensor(x=out, table=rank_table)
last = fluid.layers.sequence_last_step(input=all_timesteps)
logits = fluid.layers.fc(input=last, size=1, act=None)
loss = fluid.layers.sigmoid_cross_entropy_with_logits(
......
......@@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle.fluid.layers import lod_rank_table, data
from paddle.fluid.layers import data
from paddle.fluid.layers.control_flow import lod_rank_table
from paddle.fluid.executor import Executor
import paddle.fluid.core as core
import numpy
......
......@@ -20,6 +20,11 @@ from paddle.fluid.framework import Program, program_guard
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
from paddle.fluid.layers.control_flow import lod_rank_table
from paddle.fluid.layers.control_flow import max_sequence_len
from paddle.fluid.layers.control_flow import lod_tensor_to_array
from paddle.fluid.layers.control_flow import array_to_lod_tensor
class TestCPULoDTensorArrayOps(unittest.TestCase):
def place(self):
......@@ -137,13 +142,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
with program_guard(program):
x = layers.data(name='x', shape=[10])
x.persistable = True
table = layers.lod_rank_table(x, level=level)
max_len = layers.max_sequence_len(table)
table = lod_rank_table(x, level=level)
max_len = max_sequence_len(table)
max_len.persistable = True
array = layers.lod_tensor_to_array(x, table)
array = lod_tensor_to_array(x, table)
array.persistable = True
result = layers.array_to_lod_tensor(array, table)
result = array_to_lod_tensor(array, table)
result.persistable = True
exe = Executor(place)
scope = core.Scope()
......@@ -181,9 +186,9 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase):
with program_guard(program):
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
table = layers.lod_rank_table(x, level=0)
array = layers.lod_tensor_to_array(x, table)
result = layers.array_to_lod_tensor(array, table)
table = lod_rank_table(x, level=0)
array = lod_tensor_to_array(x, table)
result = array_to_lod_tensor(array, table)
mean = layers.mean(result)
......
......@@ -15,6 +15,7 @@
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.layers.control_flow import lod_rank_table
import numpy
......@@ -34,7 +35,7 @@ class TestReorderLoDTensor(unittest.TestCase):
dat.stop_gradient = False
rank_dat = fluid.layers.data(
name=cls.data_desc[1][0], shape=cls.data_desc[1][1])
table = fluid.layers.lod_rank_table(rank_dat)
table = lod_rank_table(rank_dat)
new_dat = fluid.layers.reorder_lod_tensor_by_rank(
x=dat, rank_table=table)
loss = fluid.layers.reduce_sum(new_dat)
......
......@@ -21,6 +21,9 @@ from paddle.fluid.framework import default_main_program, switch_main_program
from paddle.fluid.framework import Program
import numpy as np
from paddle.fluid.layers.control_flow import shrink_memory
from paddle.fluid.layers.control_flow import lod_rank_table
class TestShrinkRNNMemoryBase(unittest.TestCase):
def setUp(self):
......@@ -30,15 +33,15 @@ class TestShrinkRNNMemoryBase(unittest.TestCase):
x.stop_gradient = False
rank_table_tensor = layers.data(
'rank_table_tensor', shape=[1], dtype='float32', lod_level=1)
table = layers.lod_rank_table(x=rank_table_tensor)
table = lod_rank_table(x=rank_table_tensor)
i = layers.zeros(dtype='int64', shape=[1])
self.mem1 = layers.shrink_memory(x=x, i=i, table=table)
self.mem1 = shrink_memory(x=x, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table)
self.mem2 = shrink_memory(x=self.mem1, i=i, table=table)
i = layers.increment(x=i)
i.stop_gradient = True
self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table)
self.mem3 = shrink_memory(x=self.mem2, i=i, table=table)
mem3_mean = layers.mean(self.mem3)
append_backward(loss=mem3_mean)
self.x_grad = self.main_program.global_block().var('x@GRAD')
......
......@@ -19,6 +19,8 @@ import paddle.fluid.layers as layers
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.executor import Executor
from paddle.fluid.backward import append_backward
from paddle.fluid.layers.control_flow import split_lod_tensor
from paddle.fluid.layers.control_flow import merge_lod_tensor
class TestCPULoDTensorArrayOps(unittest.TestCase):
......@@ -96,12 +98,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase):
y = layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out_true, out_false = split_lod_tensor(input=x, mask=y, level=level)
out_true.persistable = True
out_false.persistable = True
out = layers.merge_lod_tensor(
out = merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
out.persistable = True
......@@ -142,9 +143,8 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase):
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
out_true, out_false = split_lod_tensor(input=x, mask=y, level=level)
out = merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
mean = layers.mean(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册